repo_name
stringclasses
6 values
pr_number
int64
512
78.9k
pr_title
stringlengths
3
144
pr_description
stringlengths
0
30.3k
author
stringlengths
2
21
date_created
timestamp[ns, tz=UTC]
date_merged
timestamp[ns, tz=UTC]
previous_commit
stringlengths
40
40
pr_commit
stringlengths
40
40
query
stringlengths
17
30.4k
filepath
stringlengths
9
210
before_content
stringlengths
0
112M
after_content
stringlengths
0
112M
label
int64
-1
1
dotnet/runtime
66,235
Revert "Fix issues related to JsonSerializerOptions mutation and caching."
Reverts dotnet/runtime#65863 Fixes #66232
jkotas
2022-03-05T06:23:45Z
2022-03-05T14:08:07Z
17662fc30cfd4cc6e7dbba978a3fb512380c0b70
3ede8095da51d5d27a890020b61376155e9a61c2
Revert "Fix issues related to JsonSerializerOptions mutation and caching.". Reverts dotnet/runtime#65863 Fixes #66232
./src/libraries/Common/tests/TestUtilities/System/RetryHelper.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using System.Threading; using System.Threading.Tasks; namespace System { public static partial class RetryHelper { private static readonly Func<int, int> s_defaultBackoffFunc = i => Math.Min(i * 100, 60_000); private static readonly Predicate<Exception> s_defaultRetryWhenFunc = _ => true; /// <summary>Executes the <paramref name="test"/> action up to a maximum of <paramref name="maxAttempts"/> times.</summary> /// <param name="maxAttempts">The maximum number of times to invoke <paramref name="test"/>.</param> /// <param name="test">The test to invoke.</param> /// <param name="backoffFunc">After a failure, invoked to determine how many milliseconds to wait before the next attempt. It's passed the number of iterations attempted.</param> /// <param name="retryWhen">Invoked to select the exceptions to retry on. If not set, any exception will trigger a retry.</param> public static void Execute(Action test, int maxAttempts = 5, Func<int, int> backoffFunc = null, Predicate<Exception> retryWhen = null) { // Validate arguments if (maxAttempts < 1) { throw new ArgumentOutOfRangeException(nameof(maxAttempts)); } if (test == null) { throw new ArgumentNullException(nameof(test)); } retryWhen ??= s_defaultRetryWhenFunc; // Execute the test until it either passes or we run it maxAttempts times var exceptions = new List<Exception>(); for (int i = 1; i <= maxAttempts; i++) { try { test(); return; } catch (Exception e) when (retryWhen(e)) { exceptions.Add(e); if (i == maxAttempts) { throw new AggregateException(exceptions); } } Thread.Sleep((backoffFunc ?? s_defaultBackoffFunc)(i)); } } /// <summary>Executes the <paramref name="test"/> action up to a maximum of <paramref name="maxAttempts"/> times.</summary> /// <param name="maxAttempts">The maximum number of times to invoke <paramref name="test"/>.</param> /// <param name="test">The test to invoke.</param> /// <param name="backoffFunc">After a failure, invoked to determine how many milliseconds to wait before the next attempt. It's passed the number of iterations attempted.</param> /// <param name="retryWhen">Invoked to select the exceptions to retry on. If not set, any exception will trigger a retry.</param> public static async Task ExecuteAsync(Func<Task> test, int maxAttempts = 5, Func<int, int> backoffFunc = null, Predicate<Exception> retryWhen = null) { // Validate arguments if (maxAttempts < 1) { throw new ArgumentOutOfRangeException(nameof(maxAttempts)); } if (test == null) { throw new ArgumentNullException(nameof(test)); } retryWhen ??= s_defaultRetryWhenFunc; // Execute the test until it either passes or we run it maxAttempts times var exceptions = new List<Exception>(); for (int i = 1; i <= maxAttempts; i++) { try { await test().ConfigureAwait(false); return; } catch (Exception e) when (retryWhen(e)) { exceptions.Add(e); if (i == maxAttempts) { throw new AggregateException(exceptions); } } await Task.Delay((backoffFunc ?? s_defaultBackoffFunc)(i)).ConfigureAwait(false); } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using System.Threading; using System.Threading.Tasks; namespace System { public static partial class RetryHelper { private static readonly Func<int, int> s_defaultBackoffFunc = i => Math.Min(i * 100, 60_000); private static readonly Predicate<Exception> s_defaultRetryWhenFunc = _ => true; /// <summary>Executes the <paramref name="test"/> action up to a maximum of <paramref name="maxAttempts"/> times.</summary> /// <param name="maxAttempts">The maximum number of times to invoke <paramref name="test"/>.</param> /// <param name="test">The test to invoke.</param> /// <param name="backoffFunc">After a failure, invoked to determine how many milliseconds to wait before the next attempt. It's passed the number of iterations attempted.</param> /// <param name="retryWhen">Invoked to select the exceptions to retry on. If not set, any exception will trigger a retry.</param> public static void Execute(Action test, int maxAttempts = 5, Func<int, int> backoffFunc = null, Predicate<Exception> retryWhen = null) { // Validate arguments if (maxAttempts < 1) { throw new ArgumentOutOfRangeException(nameof(maxAttempts)); } if (test == null) { throw new ArgumentNullException(nameof(test)); } retryWhen ??= s_defaultRetryWhenFunc; // Execute the test until it either passes or we run it maxAttempts times var exceptions = new List<Exception>(); for (int i = 1; i <= maxAttempts; i++) { try { test(); return; } catch (Exception e) when (retryWhen(e)) { exceptions.Add(e); if (i == maxAttempts) { throw new AggregateException(exceptions); } } Thread.Sleep((backoffFunc ?? s_defaultBackoffFunc)(i)); } } /// <summary>Executes the <paramref name="test"/> action up to a maximum of <paramref name="maxAttempts"/> times.</summary> /// <param name="maxAttempts">The maximum number of times to invoke <paramref name="test"/>.</param> /// <param name="test">The test to invoke.</param> /// <param name="backoffFunc">After a failure, invoked to determine how many milliseconds to wait before the next attempt. It's passed the number of iterations attempted.</param> /// <param name="retryWhen">Invoked to select the exceptions to retry on. If not set, any exception will trigger a retry.</param> public static async Task ExecuteAsync(Func<Task> test, int maxAttempts = 5, Func<int, int> backoffFunc = null, Predicate<Exception> retryWhen = null) { // Validate arguments if (maxAttempts < 1) { throw new ArgumentOutOfRangeException(nameof(maxAttempts)); } if (test == null) { throw new ArgumentNullException(nameof(test)); } retryWhen ??= s_defaultRetryWhenFunc; // Execute the test until it either passes or we run it maxAttempts times var exceptions = new List<Exception>(); for (int i = 1; i <= maxAttempts; i++) { try { await test().ConfigureAwait(false); return; } catch (Exception e) when (retryWhen(e)) { exceptions.Add(e); if (i == maxAttempts) { throw new AggregateException(exceptions); } } await Task.Delay((backoffFunc ?? s_defaultBackoffFunc)(i)).ConfigureAwait(false); } } } }
-1
dotnet/runtime
66,235
Revert "Fix issues related to JsonSerializerOptions mutation and caching."
Reverts dotnet/runtime#65863 Fixes #66232
jkotas
2022-03-05T06:23:45Z
2022-03-05T14:08:07Z
17662fc30cfd4cc6e7dbba978a3fb512380c0b70
3ede8095da51d5d27a890020b61376155e9a61c2
Revert "Fix issues related to JsonSerializerOptions mutation and caching.". Reverts dotnet/runtime#65863 Fixes #66232
./src/libraries/System.Net.Sockets/tests/FunctionalTests/ArgumentValidationTests.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Diagnostics; using System.Collections; using System.Collections.Generic; using System.Threading; using System.Threading.Tasks; using Xunit; namespace System.Net.Sockets.Tests { public class ArgumentValidation { // This type is used to test Socket.Select's argument validation. private sealed class LargeList : IList { private const int MaxSelect = 65536; public int Count { get { return MaxSelect + 1; } } public bool IsFixedSize { get { return true; } } public bool IsReadOnly { get { return true; } } public bool IsSynchronized { get { return false; } } public object SyncRoot { get { return null; } } public object this[int index] { get { return null; } set { } } public int Add(object value) { return -1; } public void Clear() { } public bool Contains(object value) { return false; } public void CopyTo(Array array, int index) { } public IEnumerator GetEnumerator() { return null; } public int IndexOf(object value) { return -1; } public void Insert(int index, object value) { } public void Remove(object value) { } public void RemoveAt(int index) { } } private static readonly byte[] s_buffer = new byte[1]; private static readonly IList<ArraySegment<byte>> s_buffers = new List<ArraySegment<byte>> { new ArraySegment<byte>(s_buffer) }; private static readonly SocketAsyncEventArgs s_eventArgs = new SocketAsyncEventArgs(); private static readonly Socket s_ipv4Socket = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp); private static readonly Socket s_ipv6Socket = new Socket(AddressFamily.InterNetworkV6, SocketType.Stream, ProtocolType.Tcp); private static void TheAsyncCallback(IAsyncResult ar) { } private static Socket GetSocket(AddressFamily addressFamily = AddressFamily.InterNetwork) { Debug.Assert(addressFamily == AddressFamily.InterNetwork || addressFamily == AddressFamily.InterNetworkV6); return addressFamily == AddressFamily.InterNetwork ? s_ipv4Socket : s_ipv6Socket; } [Fact] public void SetExclusiveAddressUse_BoundSocket_Throws_InvalidOperation() { using (var socket = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp)) { socket.Bind(new IPEndPoint(IPAddress.Loopback, 0)); Assert.Throws<InvalidOperationException>(() => { socket.ExclusiveAddressUse = true; }); } } [Fact] public void SetReceiveBufferSize_Negative_ArgumentOutOfRange() { Assert.Throws<ArgumentOutOfRangeException>(() => { GetSocket().ReceiveBufferSize = -1; }); } [Fact] public void SetSendBufferSize_Negative_ArgumentOutOfRange() { Assert.Throws<ArgumentOutOfRangeException>(() => { GetSocket().SendBufferSize = -1; }); } [Fact] public void SetReceiveTimeout_LessThanNegativeOne_Throws_ArgumentOutOfRange() { Assert.Throws<ArgumentOutOfRangeException>(() => { GetSocket().ReceiveTimeout = int.MinValue; }); } [Fact] public void SetSendTimeout_LessThanNegativeOne_Throws_ArgumentOutOfRange() { Assert.Throws<ArgumentOutOfRangeException>(() => { GetSocket().SendTimeout = int.MinValue; }); } [Fact] public void SetTtl_OutOfRange_Throws_ArgumentOutOfRange() { Assert.Throws<ArgumentOutOfRangeException>(() => { GetSocket().Ttl = -1; }); Assert.Throws<ArgumentOutOfRangeException>(() => { GetSocket().Ttl = 256; }); } [Fact] public void DontFragment_IPv6_Throws_NotSupported() { Assert.Throws<NotSupportedException>(() => GetSocket(AddressFamily.InterNetworkV6).DontFragment); } [Fact] public void SetDontFragment_Throws_NotSupported() { Assert.Throws<NotSupportedException>(() => { GetSocket(AddressFamily.InterNetworkV6).DontFragment = true; }); } [Fact] public void Bind_Throws_NullEndPoint_Throws_ArgumentNull() { Assert.Throws<ArgumentNullException>(() => GetSocket().Bind(null)); } [Fact] public void Connect_EndPoint_NullEndPoint_Throws_ArgumentNull() { Assert.Throws<ArgumentNullException>(() => GetSocket().Connect(null)); } [Fact] public void Connect_EndPoint_ListeningSocket_Throws_InvalidOperation() { using (var socket = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp)) { socket.Bind(new IPEndPoint(IPAddress.Loopback, 0)); socket.Listen(1); Assert.Throws<InvalidOperationException>(() => socket.Connect(new IPEndPoint(IPAddress.Loopback, 1))); } } [Fact] public void Connect_IPAddress_NullIPAddress_Throws_ArgumentNull() { Assert.Throws<ArgumentNullException>(() => GetSocket().Connect((IPAddress)null, 1)); } [Fact] public void Connect_IPAddress_InvalidPort_Throws_ArgumentOutOfRange() { Assert.Throws<ArgumentOutOfRangeException>(() => GetSocket().Connect(IPAddress.Loopback, -1)); Assert.Throws<ArgumentOutOfRangeException>(() => GetSocket().Connect(IPAddress.Loopback, 65536)); } [Fact] public void Connect_IPAddress_InvalidAddressFamily_Throws_NotSupported() { Assert.Throws<NotSupportedException>(() => GetSocket(AddressFamily.InterNetwork).Connect(IPAddress.IPv6Loopback, 1)); Assert.Throws<NotSupportedException>(() => GetSocket(AddressFamily.InterNetworkV6).Connect(IPAddress.Loopback, 1)); } [Fact] public void Connect_Host_NullHost_Throws_ArgumentNull() { Assert.Throws<ArgumentNullException>(() => GetSocket().Connect((string)null, 1)); } [Fact] public void Connect_Host_InvalidPort_Throws_ArgumentOutOfRange() { Assert.Throws<ArgumentOutOfRangeException>(() => GetSocket().Connect("localhost", -1)); Assert.Throws<ArgumentOutOfRangeException>(() => GetSocket().Connect("localhost", 65536)); } [Fact] public void Connect_IPAddresses_NullArray_Throws_ArgumentNull() { Assert.Throws<ArgumentNullException>(() => GetSocket().Connect((IPAddress[])null, 1)); } [Fact] public void Connect_IPAddresses_EmptyArray_Throws_Argument() { AssertExtensions.Throws<ArgumentException>("addresses", () => GetSocket().Connect(new IPAddress[0], 1)); } [Fact] public void Connect_IPAddresses_InvalidPort_Throws_ArgumentOutOfRange() { Assert.Throws<ArgumentOutOfRangeException>(() => GetSocket().Connect(new[] { IPAddress.Loopback }, -1)); Assert.Throws<ArgumentOutOfRangeException>(() => GetSocket().Connect(new[] { IPAddress.Loopback }, 65536)); } [Fact] public void Accept_NotBound_Throws_InvalidOperation() { Assert.Throws<InvalidOperationException>(() => GetSocket().Accept()); } [Fact] public void Accept_NotListening_Throws_InvalidOperation() { using (var socket = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp)) { socket.Bind(new IPEndPoint(IPAddress.Loopback, 0)); Assert.Throws<InvalidOperationException>(() => socket.Accept()); } } [Fact] public void Send_Buffer_NullBuffer_Throws_ArgumentNull() { SocketError errorCode; Assert.Throws<ArgumentNullException>(() => GetSocket().Send(null, 0, 0, SocketFlags.None, out errorCode)); } [Fact] public void Send_Buffer_InvalidOffset_Throws_ArgumentOutOfRange() { SocketError errorCode; Assert.Throws<ArgumentOutOfRangeException>(() => GetSocket().Send(s_buffer, -1, 0, SocketFlags.None, out errorCode)); Assert.Throws<ArgumentOutOfRangeException>(() => GetSocket().Send(s_buffer, s_buffer.Length + 1, 0, SocketFlags.None, out errorCode)); } [Fact] public void Send_Buffer_InvalidCount_Throws_ArgumentOutOfRange() { SocketError errorCode; Assert.Throws<ArgumentOutOfRangeException>(() => GetSocket().Send(s_buffer, 0, -1, SocketFlags.None, out errorCode)); Assert.Throws<ArgumentOutOfRangeException>(() => GetSocket().Send(s_buffer, 0, s_buffer.Length + 1, SocketFlags.None, out errorCode)); Assert.Throws<ArgumentOutOfRangeException>(() => GetSocket().Send(s_buffer, s_buffer.Length, 1, SocketFlags.None, out errorCode)); } [Fact] public void Send_Buffers_NullBuffers_Throws_ArgumentNull() { SocketError errorCode; Assert.Throws<ArgumentNullException>(() => GetSocket().Send((IList<ArraySegment<byte>>)null, SocketFlags.None, out errorCode)); } [Fact] public void Send_Buffers_EmptyBuffers_Throws_Argument() { SocketError errorCode; AssertExtensions.Throws<ArgumentException>("buffers", () => GetSocket().Send(new List<ArraySegment<byte>>(), SocketFlags.None, out errorCode)); } [Fact] public void Receive_Buffer_NullBuffer_Throws_ArgumentNull() { SocketError errorCode; Assert.Throws<ArgumentNullException>(() => GetSocket().Receive(null, 0, 0, SocketFlags.None, out errorCode)); } [Fact] public void Receive_Buffer_InvalidOffset_Throws_ArgumentOutOfRange() { SocketError errorCode; Assert.Throws<ArgumentOutOfRangeException>(() => GetSocket().Receive(s_buffer, -1, 0, SocketFlags.None, out errorCode)); Assert.Throws<ArgumentOutOfRangeException>(() => GetSocket().Receive(s_buffer, s_buffer.Length + 1, 0, SocketFlags.None, out errorCode)); } [Fact] public void Receive_Buffer_InvalidCount_Throws_ArgumentOutOfRange() { SocketError errorCode; Assert.Throws<ArgumentOutOfRangeException>(() => GetSocket().Receive(s_buffer, 0, -1, SocketFlags.None, out errorCode)); Assert.Throws<ArgumentOutOfRangeException>(() => GetSocket().Receive(s_buffer, 0, s_buffer.Length + 1, SocketFlags.None, out errorCode)); Assert.Throws<ArgumentOutOfRangeException>(() => GetSocket().Receive(s_buffer, s_buffer.Length, 1, SocketFlags.None, out errorCode)); } [Fact] public void Receive_Buffers_NullBuffers_Throws_ArgumentNull() { SocketError errorCode; Assert.Throws<ArgumentNullException>(() => GetSocket().Receive((IList<ArraySegment<byte>>)null, SocketFlags.None, out errorCode)); } [Fact] public void Receive_Buffers_EmptyBuffers_Throws_Argument() { SocketError errorCode; AssertExtensions.Throws<ArgumentException>("buffers", () => GetSocket().Receive(new List<ArraySegment<byte>>(), SocketFlags.None, out errorCode)); } [Fact] public void SetSocketOption_Object_ObjectNull_Throws_ArgumentNull() { Assert.Throws<ArgumentNullException>(() => GetSocket().SetSocketOption(SocketOptionLevel.Socket, SocketOptionName.Linger, (object)null)); } [Fact] public void SetSocketOption_Linger_NotLingerOption_Throws_Argument() { AssertExtensions.Throws<ArgumentException>("optionValue", () => GetSocket().SetSocketOption(SocketOptionLevel.Socket, SocketOptionName.Linger, new object())); } [Fact] public void SetSocketOption_Linger_InvalidLingerTime_Throws_Argument() { AssertExtensions.Throws<ArgumentException>("optionValue", () => GetSocket().SetSocketOption(SocketOptionLevel.Socket, SocketOptionName.Linger, new LingerOption(true, -1))); AssertExtensions.Throws<ArgumentException>("optionValue", () => GetSocket().SetSocketOption(SocketOptionLevel.Socket, SocketOptionName.Linger, new LingerOption(true, (int)ushort.MaxValue + 1))); } [Fact] public void SetSocketOption_IPMulticast_NotIPMulticastOption_Throws_Argument() { AssertExtensions.Throws<ArgumentException>("optionValue", () => GetSocket().SetSocketOption(SocketOptionLevel.IP, SocketOptionName.AddMembership, new object())); AssertExtensions.Throws<ArgumentException>("optionValue", () => GetSocket().SetSocketOption(SocketOptionLevel.IP, SocketOptionName.DropMembership, new object())); } [Fact] public void SetSocketOption_IPv6Multicast_NotIPMulticastOption_Throws_Argument() { AssertExtensions.Throws<ArgumentException>("optionValue", () => GetSocket().SetSocketOption(SocketOptionLevel.IPv6, SocketOptionName.AddMembership, new object())); AssertExtensions.Throws<ArgumentException>("optionValue", () => GetSocket().SetSocketOption(SocketOptionLevel.IPv6, SocketOptionName.DropMembership, new object())); } [Fact] public void SetSocketOption_Object_InvalidOptionName_Throws_Argument() { AssertExtensions.Throws<ArgumentException>("optionValue", () => GetSocket().SetSocketOption(SocketOptionLevel.Socket, SocketOptionName.NoDelay, new object())); } [Fact] public void Select_NullOrEmptyLists_Throws_ArgumentNull() { var emptyList = new List<Socket>(); Assert.Throws<ArgumentNullException>(() => Socket.Select(null, null, null, -1)); Assert.Throws<ArgumentNullException>(() => Socket.Select(emptyList, null, null, -1)); Assert.Throws<ArgumentNullException>(() => Socket.Select(null, emptyList, null, -1)); Assert.Throws<ArgumentNullException>(() => Socket.Select(emptyList, emptyList, null, -1)); Assert.Throws<ArgumentNullException>(() => Socket.Select(null, null, emptyList, -1)); Assert.Throws<ArgumentNullException>(() => Socket.Select(emptyList, null, emptyList, -1)); Assert.Throws<ArgumentNullException>(() => Socket.Select(null, emptyList, emptyList, -1)); Assert.Throws<ArgumentNullException>(() => Socket.Select(emptyList, emptyList, emptyList, -1)); } [Fact] public void Select_LargeList_Throws_ArgumentOutOfRange() { var largeList = new LargeList(); Assert.Throws<ArgumentOutOfRangeException>(() => Socket.Select(largeList, null, null, -1)); Assert.Throws<ArgumentOutOfRangeException>(() => Socket.Select(null, largeList, null, -1)); Assert.Throws<ArgumentOutOfRangeException>(() => Socket.Select(null, null, largeList, -1)); } [Fact] public void AcceptAsync_NullAsyncEventArgs_Throws_ArgumentNull() { Assert.Throws<ArgumentNullException>(() => GetSocket().AcceptAsync((SocketAsyncEventArgs)null)); } [Fact] public void AcceptAsync_BufferList_Throws_Argument() { var eventArgs = new SocketAsyncEventArgs { BufferList = s_buffers }; AssertExtensions.Throws<ArgumentException>("e", () => GetSocket().AcceptAsync(eventArgs)); } [Fact] public void AcceptAsync_NotBound_Throws_InvalidOperation() { Assert.Throws<InvalidOperationException>(() => GetSocket().AcceptAsync(s_eventArgs)); } [Fact] public void AcceptAsync_NotListening_Throws_InvalidOperation() { using (var socket = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp)) { socket.Bind(new IPEndPoint(IPAddress.Loopback, 0)); Assert.Throws<InvalidOperationException>(() => socket.AcceptAsync(s_eventArgs)); } } [Fact] public void ConnectAsync_NullAsyncEventArgs_Throws_ArgumentNull() { Assert.Throws<ArgumentNullException>(() => GetSocket().ConnectAsync((SocketAsyncEventArgs)null)); } [Fact] public void ConnectAsync_BufferList_Throws_Argument() { var eventArgs = new SocketAsyncEventArgs { BufferList = s_buffers }; AssertExtensions.Throws<ArgumentException>("BufferList", () => GetSocket().ConnectAsync(eventArgs)); } [Fact] public void ConnectAsync_NullRemoteEndPoint_Throws_ArgumentNull() { Assert.Throws<ArgumentNullException>(() => GetSocket().ConnectAsync(s_eventArgs)); } [Fact] public void ConnectAsync_ListeningSocket_Throws_InvalidOperation() { var eventArgs = new SocketAsyncEventArgs { RemoteEndPoint = new IPEndPoint(IPAddress.Loopback, 1) }; using (var socket = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp)) { socket.Bind(new IPEndPoint(IPAddress.Loopback, 0)); socket.Listen(1); Assert.Throws<InvalidOperationException>(() => socket.ConnectAsync(eventArgs)); } } [Fact] public void ConnectAsync_AddressFamily_Throws_NotSupported() { var eventArgs = new SocketAsyncEventArgs { RemoteEndPoint = new DnsEndPoint("localhost", 1, AddressFamily.InterNetworkV6) }; Assert.Throws<NotSupportedException>(() => GetSocket(AddressFamily.InterNetwork).ConnectAsync(eventArgs)); eventArgs.RemoteEndPoint = new IPEndPoint(IPAddress.IPv6Loopback, 1); Assert.Throws<NotSupportedException>(() => GetSocket(AddressFamily.InterNetwork).ConnectAsync(eventArgs)); } [Fact] public void ConnectAsync_Static_NullAsyncEventArgs_Throws_ArgumentNull() { Assert.Throws<ArgumentNullException>(() => Socket.ConnectAsync(SocketType.Stream, ProtocolType.Tcp, null)); } [Fact] public void ConnectAsync_Static_BufferList_Throws_Argument() { var eventArgs = new SocketAsyncEventArgs { BufferList = s_buffers }; AssertExtensions.Throws<ArgumentException>("e", () => Socket.ConnectAsync(SocketType.Stream, ProtocolType.Tcp, eventArgs)); } [Fact] public void ConnectAsync_Static_NullRemoteEndPoint_Throws_ArgumentException() { Assert.Throws<ArgumentException>("e", () => Socket.ConnectAsync(SocketType.Stream, ProtocolType.Tcp, s_eventArgs)); } [Fact] public void ReceiveAsync_NullAsyncEventArgs_Throws_ArgumentNull() { Assert.Throws<ArgumentNullException>(() => GetSocket().ReceiveAsync((SocketAsyncEventArgs)null)); } [Fact] public void SendAsync_NullAsyncEventArgs_Throws_ArgumentNull() { Assert.Throws<ArgumentNullException>(() => GetSocket().SendAsync((SocketAsyncEventArgs)null)); } [Fact] public void SendPacketsAsync_NullAsyncEventArgs_Throws_ArgumentNull() { Assert.Throws<ArgumentNullException>(() => GetSocket().SendPacketsAsync(null)); } [Fact] public void SendPacketsAsync_NullSendPacketsElements_Throws_ArgumentException() { Assert.Throws<ArgumentException>("e", () => GetSocket().SendPacketsAsync(s_eventArgs)); } [Fact] public void SendPacketsAsync_NotConnected_Throws_NotSupported() { var eventArgs = new SocketAsyncEventArgs { SendPacketsElements = new SendPacketsElement[0] }; Assert.Throws<NotSupportedException>(() => GetSocket().SendPacketsAsync(eventArgs)); } [Theory] [InlineData(true)] [InlineData(false)] [PlatformSpecific(TestPlatforms.AnyUnix)] // API throws PNSE on Unix public void Socket_Connect_DnsEndPoint_ExposedHandle_NotSupported(bool useSafeHandle) { using (Socket s = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp)) { if (useSafeHandle) { _ = s.SafeHandle; } else { _ = s.Handle; } Assert.Throws<PlatformNotSupportedException>(() => s.Connect(new DnsEndPoint("localhost", 12345))); } } [Fact] public async Task Socket_Connect_DnsEndPointWithIPAddressString_Supported() { using (Socket host = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp)) { host.Bind(new IPEndPoint(IPAddress.Loopback, 0)); host.Listen(1); Task accept = host.AcceptAsync(); using (Socket s = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp)) { s.Connect(new DnsEndPoint(IPAddress.Loopback.ToString(), ((IPEndPoint)host.LocalEndPoint).Port)); } await accept; } } [Theory] [InlineData(true)] [InlineData(false)] [PlatformSpecific(TestPlatforms.AnyUnix)] // API throws PNSE on Unix public void Socket_Connect_StringHost_ExposedHandle_NotSupported(bool useSafeHandle) { using (Socket s = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp)) { if (useSafeHandle) { _ = s.SafeHandle; } else { _ = s.Handle; } Assert.Throws<PlatformNotSupportedException>(() => s.Connect("localhost", 12345)); } } [Fact] public async Task Socket_Connect_IPv4AddressAsStringHost_Supported() { using (Socket host = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp)) { host.Bind(new IPEndPoint(IPAddress.Loopback, 0)); host.Listen(1); Task accept = host.AcceptAsync(); using (Socket s = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp)) { s.Connect(IPAddress.Loopback.ToString(), ((IPEndPoint)host.LocalEndPoint).Port); } await accept; } } [Fact] public async Task Socket_Connect_IPv6AddressAsStringHost_Supported() { using (Socket host = new Socket(AddressFamily.InterNetworkV6, SocketType.Stream, ProtocolType.Tcp)) { host.Bind(new IPEndPoint(IPAddress.IPv6Loopback, 0)); host.Listen(1); Task accept = host.AcceptAsync(); using (Socket s = new Socket(AddressFamily.InterNetworkV6, SocketType.Stream, ProtocolType.Tcp)) { s.Connect(IPAddress.IPv6Loopback.ToString(), ((IPEndPoint)host.LocalEndPoint).Port); } await accept; } } [Theory] [InlineData(true)] [InlineData(false)] [PlatformSpecific(TestPlatforms.AnyUnix)] // API throws PNSE on Unix public void Socket_Connect_MultipleAddresses_ExposedHandle_NotSupported(bool useSafeHandle) { using (Socket s = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp)) { if (useSafeHandle) { _ = s.SafeHandle; } else { _ = s.Handle; } Assert.Throws<PlatformNotSupportedException>(() => s.Connect(new[] { IPAddress.Loopback }, 12345)); } } [Theory] [InlineData(true)] [InlineData(false)] [PlatformSpecific(TestPlatforms.AnyUnix)] // API throws PNSE on Unix public void Socket_ConnectAsync_DnsEndPoint_ExposedHandle_NotSupported(bool useSafeHandle) { using (Socket s = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp)) { if (useSafeHandle) { _ = s.SafeHandle; } else { _ = s.Handle; } Assert.Throws<PlatformNotSupportedException>(() => { s.ConnectAsync(new DnsEndPoint("localhost", 12345)); }); } } [Fact] public async Task Socket_ConnectAsync_DnsEndPointWithIPAddressString_Supported() { using (Socket host = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp)) { host.Bind(new IPEndPoint(IPAddress.Loopback, 0)); host.Listen(1); using (Socket s = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp)) { await Task.WhenAll( host.AcceptAsync(), s.ConnectAsync(new DnsEndPoint(IPAddress.Loopback.ToString(), ((IPEndPoint)host.LocalEndPoint).Port))); } } } [Theory] [InlineData(true)] [InlineData(false)] [PlatformSpecific(TestPlatforms.AnyUnix)] // API throws PNSE on Unix public void Socket_ConnectAsync_StringHost_ExposedHandle_NotSupported(bool useSafeHandle) { using (Socket s = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp)) { if (useSafeHandle) { _ = s.SafeHandle; } else { _ = s.Handle; } Assert.Throws<PlatformNotSupportedException>(() => { s.ConnectAsync("localhost", 12345); }); } } [Fact] public async Task Socket_ConnectAsync_IPv4AddressAsStringHost_Supported() { using (Socket host = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp)) { host.Bind(new IPEndPoint(IPAddress.Loopback, 0)); host.Listen(1); using (Socket s = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp)) { await Task.WhenAll( host.AcceptAsync(), s.ConnectAsync(IPAddress.Loopback.ToString(), ((IPEndPoint)host.LocalEndPoint).Port)); } } } [Fact] public async Task Socket_ConnectAsync_IPv6AddressAsStringHost_Supported() { using (Socket host = new Socket(AddressFamily.InterNetworkV6, SocketType.Stream, ProtocolType.Tcp)) { host.Bind(new IPEndPoint(IPAddress.IPv6Loopback, 0)); host.Listen(1); using (Socket s = new Socket(AddressFamily.InterNetworkV6, SocketType.Stream, ProtocolType.Tcp)) { await Task.WhenAll( host.AcceptAsync(), s.ConnectAsync(IPAddress.IPv6Loopback.ToString(), ((IPEndPoint)host.LocalEndPoint).Port)); } } } [Theory] [PlatformSpecific(TestPlatforms.AnyUnix)] // API throws PNSE on Unix [InlineData(0)] [InlineData(1)] public void Connect_ConnectTwice_NotSupported(int invalidatingAction) { using (Socket client = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp)) { switch (invalidatingAction) { case 0: IntPtr handle = client.Handle; // exposing the underlying handle break; case 1: client.SetSocketOption(SocketOptionLevel.IP, SocketOptionName.Debug, 1); // untracked socket option break; } // // Connect once, to an invalid address, expecting failure // EndPoint ep = new IPEndPoint(IPAddress.Broadcast, 1234); Assert.ThrowsAny<SocketException>(() => client.Connect(ep)); // // Connect again, expecting PlatformNotSupportedException // Assert.Throws<PlatformNotSupportedException>(() => client.Connect(ep)); } } [Theory] [PlatformSpecific(TestPlatforms.AnyUnix)] // API throws PNSE on Unix [InlineData(0)] [InlineData(1)] public void ConnectAsync_ConnectTwice_NotSupported(int invalidatingAction) { AutoResetEvent completed = new AutoResetEvent(false); using (Socket client = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp)) { switch (invalidatingAction) { case 0: IntPtr handle = client.Handle; // exposing the underlying handle break; case 1: client.SetSocketOption(SocketOptionLevel.IP, SocketOptionName.Debug, 1); // untracked socket option break; } // // Connect once, to an invalid address, expecting failure // SocketAsyncEventArgs args = new SocketAsyncEventArgs(); args.RemoteEndPoint = new IPEndPoint(IPAddress.Broadcast, 1234); args.Completed += delegate { completed.Set(); }; if (client.ConnectAsync(args)) { Assert.True(completed.WaitOne(5000), "IPv4: Timed out while waiting for connection"); } Assert.NotEqual(SocketError.Success, args.SocketError); // // Connect again, expecting PlatformNotSupportedException // Assert.Throws<PlatformNotSupportedException>(() => client.ConnectAsync(args)); } } [Fact] public void BeginAccept_NotBound_Throws_InvalidOperation() { Assert.Throws<InvalidOperationException>(() => GetSocket().BeginAccept(TheAsyncCallback, null)); Assert.Throws<InvalidOperationException>(() => { GetSocket().AcceptAsync(); }); } [Fact] public void BeginAccept_NotListening_Throws_InvalidOperation() { using (var socket = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp)) { socket.Bind(new IPEndPoint(IPAddress.Loopback, 0)); Assert.Throws<InvalidOperationException>(() => socket.BeginAccept(TheAsyncCallback, null)); Assert.Throws<InvalidOperationException>(() => { socket.AcceptAsync(); }); } } [Fact] public void EndAccept_NullAsyncResult_Throws_ArgumentNull() { Assert.Throws<ArgumentNullException>(() => GetSocket().EndAccept(null)); } [Fact] public void BeginConnect_EndPoint_NullEndPoint_Throws_ArgumentNull() { Assert.Throws<ArgumentNullException>(() => GetSocket().BeginConnect((EndPoint)null, TheAsyncCallback, null)); Assert.Throws<ArgumentNullException>(() => { GetSocket().ConnectAsync((EndPoint)null); }); } [Fact] public void BeginConnect_EndPoint_ListeningSocket_Throws_InvalidOperation() { using (var socket = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp)) { socket.Bind(new IPEndPoint(IPAddress.Loopback, 0)); socket.Listen(1); Assert.Throws<InvalidOperationException>(() => socket.BeginConnect(new IPEndPoint(IPAddress.Loopback, 1), TheAsyncCallback, null)); Assert.Throws<InvalidOperationException>(() => { socket.ConnectAsync(new IPEndPoint(IPAddress.Loopback, 1)); }); } } [Fact] public void BeginConnect_EndPoint_AddressFamily_Throws_NotSupported() { // Unlike other tests that reuse a static Socket instance, this test avoids doing so // to work around a behavior of .NET 4.7.2. See https://github.com/dotnet/runtime/issues/26062 // for more details. using (var s = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp)) { Assert.Throws<NotSupportedException>(() => s.BeginConnect( new DnsEndPoint("localhost", 1, AddressFamily.InterNetworkV6), TheAsyncCallback, null)); } using (var s = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp)) { Assert.Throws<NotSupportedException>(() => { s.ConnectAsync( new DnsEndPoint("localhost", 1, AddressFamily.InterNetworkV6)); }); } } [Fact] public void BeginConnect_Host_NullHost_Throws_ArgumentNull() { Assert.Throws<ArgumentNullException>(() => GetSocket().BeginConnect((string)null, 1, TheAsyncCallback, null)); Assert.Throws<ArgumentNullException>(() => { GetSocket().ConnectAsync((string)null, 1); }); } [Theory] [InlineData(-1)] [InlineData(65536)] public void BeginConnect_Host_InvalidPort_Throws_ArgumentOutOfRange(int port) { Assert.Throws<ArgumentOutOfRangeException>(() => GetSocket().BeginConnect("localhost", port, TheAsyncCallback, null)); Assert.Throws<ArgumentOutOfRangeException>(() => { GetSocket().ConnectAsync("localhost", port); }); } [Fact] public void BeginConnect_Host_ListeningSocket_Throws_InvalidOperation() { using (var socket = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp)) { socket.Bind(new IPEndPoint(IPAddress.Loopback, 0)); socket.Listen(1); Assert.Throws<InvalidOperationException>(() => socket.BeginConnect("localhost", 1, TheAsyncCallback, null)); Assert.Throws<InvalidOperationException>(() => { socket.ConnectAsync("localhost", 1); }); } } [Fact] public void BeginConnect_IPAddress_NullIPAddress_Throws_ArgumentNull() { Assert.Throws<ArgumentNullException>(() => GetSocket().BeginConnect((IPAddress)null, 1, TheAsyncCallback, null)); Assert.Throws<ArgumentNullException>(() => { GetSocket().ConnectAsync((IPAddress)null, 1); }); } [Theory] [InlineData(-1)] [InlineData(65536)] public void BeginConnect_IPAddress_InvalidPort_Throws_ArgumentOutOfRange(int port) { Assert.Throws<ArgumentOutOfRangeException>(() => GetSocket().BeginConnect(IPAddress.Loopback, port, TheAsyncCallback, null)); Assert.Throws<ArgumentOutOfRangeException>(() => { GetSocket().ConnectAsync(IPAddress.Loopback, 65536); }); } [Fact] public void BeginConnect_IPAddress_AddressFamily_Throws_NotSupported() { Assert.Throws<NotSupportedException>(() => GetSocket(AddressFamily.InterNetwork).BeginConnect(IPAddress.IPv6Loopback, 1, TheAsyncCallback, null)); Assert.Throws<NotSupportedException>(() => { GetSocket(AddressFamily.InterNetwork).ConnectAsync(IPAddress.IPv6Loopback, 1); }); } [Fact] public void BeginConnect_IPAddresses_NullIPAddresses_Throws_ArgumentNull() { Assert.Throws<ArgumentNullException>(() => GetSocket().BeginConnect((IPAddress[])null, 1, TheAsyncCallback, null)); Assert.Throws<ArgumentNullException>(() => { GetSocket().ConnectAsync((IPAddress[])null, 1); }); } [Fact] public void BeginConnect_IPAddresses_EmptyIPAddresses_Throws_Argument() { AssertExtensions.Throws<ArgumentException>("addresses", () => GetSocket().BeginConnect(new IPAddress[0], 1, TheAsyncCallback, null)); AssertExtensions.Throws<ArgumentException>("addresses", () => { GetSocket().ConnectAsync(new IPAddress[0], 1); }); } [Theory] [InlineData(-1)] [InlineData(65536)] public void BeginConnect_IPAddresses_InvalidPort_Throws_ArgumentOutOfRange(int port) { Assert.Throws<ArgumentOutOfRangeException>(() => GetSocket().BeginConnect(new[] { IPAddress.Loopback }, port, TheAsyncCallback, null)); } [Theory] [InlineData(-1)] [InlineData(65536)] public async Task ConnectAsync_IPAddresses_InvalidPort_Throws_ArgumentOutOfRange(int port) { await Assert.ThrowsAsync<ArgumentOutOfRangeException>(() => GetSocket().ConnectAsync(new[] { IPAddress.Loopback }, port)); } [Fact] public void BeginConnect_IPAddresses_ListeningSocket_Throws_InvalidOperation() { using (var socket = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp)) { socket.Bind(new IPEndPoint(IPAddress.Loopback, 0)); socket.Listen(1); Assert.Throws<InvalidOperationException>(() => socket.BeginConnect(new[] { IPAddress.Loopback }, 1, TheAsyncCallback, null)); } } [Fact] public async Task ConnectAsync_IPAddresses_ListeningSocket_Throws_InvalidOperation() { using (var socket = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp)) { socket.Bind(new IPEndPoint(IPAddress.Loopback, 0)); socket.Listen(1); await Assert.ThrowsAsync<InvalidOperationException>(() => socket.ConnectAsync(new[] { IPAddress.Loopback }, 1)); } } [Fact] public void EndConnect_NullAsyncResult_Throws_ArgumentNull() { Assert.Throws<ArgumentNullException>(() => GetSocket().EndConnect(null)); } [Fact] public void EndConnect_UnrelatedAsyncResult_Throws_Argument() { AssertExtensions.Throws<ArgumentException>("asyncResult", () => GetSocket().EndConnect(Task.CompletedTask)); } [Fact] public void BeginSend_Buffer_NullBuffer_Throws_ArgumentNull() { Assert.Throws<ArgumentNullException>(() => GetSocket().BeginSend(null, 0, 0, SocketFlags.None, TheAsyncCallback, null)); Assert.Throws<ArgumentNullException>(() => { GetSocket().SendAsync(new ArraySegment<byte>(null, 0, 0), SocketFlags.None); }); } [Fact] public void BeginSend_Buffer_InvalidOffset_Throws_ArgumentOutOfRange() { Assert.Throws<ArgumentOutOfRangeException>(() => GetSocket().BeginSend(s_buffer, -1, 0, SocketFlags.None, TheAsyncCallback, null)); Assert.Throws<ArgumentOutOfRangeException>(() => GetSocket().BeginSend(s_buffer, s_buffer.Length + 1, 0, SocketFlags.None, TheAsyncCallback, null)); Assert.Throws<ArgumentOutOfRangeException>(() => { GetSocket().SendAsync(new ArraySegment<byte>(s_buffer, -1, 0), SocketFlags.None); }); Assert.ThrowsAny<ArgumentException>(() => { GetSocket().SendAsync(new ArraySegment<byte>(s_buffer, s_buffer.Length + 1, 0), SocketFlags.None); }); } [Fact] public void BeginSend_Buffer_InvalidCount_Throws_ArgumentOutOfRange() { Assert.Throws<ArgumentOutOfRangeException>(() => GetSocket().BeginSend(s_buffer, 0, -1, SocketFlags.None, TheAsyncCallback, null)); Assert.Throws<ArgumentOutOfRangeException>(() => GetSocket().BeginSend(s_buffer, 0, s_buffer.Length + 1, SocketFlags.None, TheAsyncCallback, null)); Assert.Throws<ArgumentOutOfRangeException>(() => GetSocket().BeginSend(s_buffer, s_buffer.Length, 1, SocketFlags.None, TheAsyncCallback, null)); Assert.Throws<ArgumentOutOfRangeException>(() => { GetSocket().SendAsync(new ArraySegment<byte>(s_buffer, 0, -1), SocketFlags.None); }); Assert.ThrowsAny<ArgumentException>(() => { GetSocket().SendAsync(new ArraySegment<byte>(s_buffer, 0, s_buffer.Length + 1), SocketFlags.None); }); Assert.ThrowsAny<ArgumentException>(() => { GetSocket().SendAsync(new ArraySegment<byte>(s_buffer, s_buffer.Length, 1), SocketFlags.None); }); } [Fact] public void BeginSend_Buffers_NullBuffers_Throws_ArgumentNull() { Assert.Throws<ArgumentNullException>(() => GetSocket().BeginSend(null, SocketFlags.None, TheAsyncCallback, null)); Assert.Throws<ArgumentNullException>(() => { GetSocket().SendAsync((IList<ArraySegment<byte>>)null, SocketFlags.None); }); } [Fact] public void BeginSend_Buffers_EmptyBuffers_Throws_Argument() { AssertExtensions.Throws<ArgumentException>("buffers", () => GetSocket().BeginSend(new List<ArraySegment<byte>>(), SocketFlags.None, TheAsyncCallback, null)); AssertExtensions.Throws<ArgumentException>("buffers", () => { GetSocket().SendAsync(new List<ArraySegment<byte>>(), SocketFlags.None); }); } [Fact] public void EndSend_NullAsyncResult_Throws_ArgumentNull() { Assert.Throws<ArgumentNullException>(() => GetSocket().EndSend(null)); } [Fact] public void EndSend_UnrelatedAsyncResult_Throws_Argument() { AssertExtensions.Throws<ArgumentException>("asyncResult", () => GetSocket().EndSend(Task.CompletedTask)); } [Fact] public void BeginReceive_Buffer_NullBuffer_Throws_ArgumentNull() { Assert.Throws<ArgumentNullException>(() => GetSocket().BeginReceive(null, 0, 0, SocketFlags.None, TheAsyncCallback, null)); Assert.Throws<ArgumentNullException>(() => { GetSocket().ReceiveAsync(new ArraySegment<byte>(null, 0, 0), SocketFlags.None); }); } [Fact] public void BeginReceive_Buffer_InvalidOffset_Throws_ArgumentOutOfRange() { Assert.Throws<ArgumentOutOfRangeException>(() => GetSocket().BeginReceive(s_buffer, -1, 0, SocketFlags.None, TheAsyncCallback, null)); Assert.Throws<ArgumentOutOfRangeException>(() => GetSocket().BeginReceive(s_buffer, s_buffer.Length + 1, 0, SocketFlags.None, TheAsyncCallback, null)); Assert.Throws<ArgumentOutOfRangeException>(() => { GetSocket().ReceiveAsync(new ArraySegment<byte>(s_buffer, -1, 0), SocketFlags.None); }); Assert.ThrowsAny<ArgumentException>(() => { GetSocket().ReceiveAsync(new ArraySegment<byte>(s_buffer, s_buffer.Length + 1, 0), SocketFlags.None); }); } [Fact] public void BeginReceive_Buffer_InvalidCount_Throws_ArgumentOutOfRange() { Assert.Throws<ArgumentOutOfRangeException>(() => GetSocket().BeginReceive(s_buffer, 0, -1, SocketFlags.None, TheAsyncCallback, null)); Assert.Throws<ArgumentOutOfRangeException>(() => GetSocket().BeginReceive(s_buffer, 0, s_buffer.Length + 1, SocketFlags.None, TheAsyncCallback, null)); Assert.Throws<ArgumentOutOfRangeException>(() => GetSocket().BeginReceive(s_buffer, s_buffer.Length, 1, SocketFlags.None, TheAsyncCallback, null)); Assert.Throws<ArgumentOutOfRangeException>(() => { GetSocket().ReceiveAsync(new ArraySegment<byte>(s_buffer, 0, -1), SocketFlags.None); }); Assert.ThrowsAny<ArgumentException>(() => { GetSocket().ReceiveAsync(new ArraySegment<byte>(s_buffer, 0, s_buffer.Length + 1), SocketFlags.None); }); Assert.ThrowsAny<ArgumentException>(() => { GetSocket().ReceiveAsync(new ArraySegment<byte>(s_buffer, s_buffer.Length, 1), SocketFlags.None); }); } [Fact] public void BeginReceive_Buffers_NullBuffers_Throws_ArgumentNull() { Assert.Throws<ArgumentNullException>(() => GetSocket().BeginReceive(null, SocketFlags.None, TheAsyncCallback, null)); Assert.Throws<ArgumentNullException>(() => { GetSocket().ReceiveAsync((IList<ArraySegment<byte>>)null, SocketFlags.None); }); } [Fact] public void BeginReceive_Buffers_EmptyBuffers_Throws_Argument() { AssertExtensions.Throws<ArgumentException>("buffers", () => GetSocket().BeginReceive(new List<ArraySegment<byte>>(), SocketFlags.None, TheAsyncCallback, null)); AssertExtensions.Throws<ArgumentException>("buffers", () => { GetSocket().ReceiveAsync(new List<ArraySegment<byte>>(), SocketFlags.None); }); } [Fact] public void EndReceive_NullAsyncResult_Throws_ArgumentNull() { Assert.Throws<ArgumentNullException>(() => GetSocket().EndReceive(null)); } [Fact] public void CancelConnectAsync_NullEventArgs_Throws_ArgumentNull() { Assert.Throws<ArgumentNullException>(() => Socket.CancelConnectAsync(null)); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Diagnostics; using System.Collections; using System.Collections.Generic; using System.Threading; using System.Threading.Tasks; using Xunit; namespace System.Net.Sockets.Tests { public class ArgumentValidation { // This type is used to test Socket.Select's argument validation. private sealed class LargeList : IList { private const int MaxSelect = 65536; public int Count { get { return MaxSelect + 1; } } public bool IsFixedSize { get { return true; } } public bool IsReadOnly { get { return true; } } public bool IsSynchronized { get { return false; } } public object SyncRoot { get { return null; } } public object this[int index] { get { return null; } set { } } public int Add(object value) { return -1; } public void Clear() { } public bool Contains(object value) { return false; } public void CopyTo(Array array, int index) { } public IEnumerator GetEnumerator() { return null; } public int IndexOf(object value) { return -1; } public void Insert(int index, object value) { } public void Remove(object value) { } public void RemoveAt(int index) { } } private static readonly byte[] s_buffer = new byte[1]; private static readonly IList<ArraySegment<byte>> s_buffers = new List<ArraySegment<byte>> { new ArraySegment<byte>(s_buffer) }; private static readonly SocketAsyncEventArgs s_eventArgs = new SocketAsyncEventArgs(); private static readonly Socket s_ipv4Socket = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp); private static readonly Socket s_ipv6Socket = new Socket(AddressFamily.InterNetworkV6, SocketType.Stream, ProtocolType.Tcp); private static void TheAsyncCallback(IAsyncResult ar) { } private static Socket GetSocket(AddressFamily addressFamily = AddressFamily.InterNetwork) { Debug.Assert(addressFamily == AddressFamily.InterNetwork || addressFamily == AddressFamily.InterNetworkV6); return addressFamily == AddressFamily.InterNetwork ? s_ipv4Socket : s_ipv6Socket; } [Fact] public void SetExclusiveAddressUse_BoundSocket_Throws_InvalidOperation() { using (var socket = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp)) { socket.Bind(new IPEndPoint(IPAddress.Loopback, 0)); Assert.Throws<InvalidOperationException>(() => { socket.ExclusiveAddressUse = true; }); } } [Fact] public void SetReceiveBufferSize_Negative_ArgumentOutOfRange() { Assert.Throws<ArgumentOutOfRangeException>(() => { GetSocket().ReceiveBufferSize = -1; }); } [Fact] public void SetSendBufferSize_Negative_ArgumentOutOfRange() { Assert.Throws<ArgumentOutOfRangeException>(() => { GetSocket().SendBufferSize = -1; }); } [Fact] public void SetReceiveTimeout_LessThanNegativeOne_Throws_ArgumentOutOfRange() { Assert.Throws<ArgumentOutOfRangeException>(() => { GetSocket().ReceiveTimeout = int.MinValue; }); } [Fact] public void SetSendTimeout_LessThanNegativeOne_Throws_ArgumentOutOfRange() { Assert.Throws<ArgumentOutOfRangeException>(() => { GetSocket().SendTimeout = int.MinValue; }); } [Fact] public void SetTtl_OutOfRange_Throws_ArgumentOutOfRange() { Assert.Throws<ArgumentOutOfRangeException>(() => { GetSocket().Ttl = -1; }); Assert.Throws<ArgumentOutOfRangeException>(() => { GetSocket().Ttl = 256; }); } [Fact] public void DontFragment_IPv6_Throws_NotSupported() { Assert.Throws<NotSupportedException>(() => GetSocket(AddressFamily.InterNetworkV6).DontFragment); } [Fact] public void SetDontFragment_Throws_NotSupported() { Assert.Throws<NotSupportedException>(() => { GetSocket(AddressFamily.InterNetworkV6).DontFragment = true; }); } [Fact] public void Bind_Throws_NullEndPoint_Throws_ArgumentNull() { Assert.Throws<ArgumentNullException>(() => GetSocket().Bind(null)); } [Fact] public void Connect_EndPoint_NullEndPoint_Throws_ArgumentNull() { Assert.Throws<ArgumentNullException>(() => GetSocket().Connect(null)); } [Fact] public void Connect_EndPoint_ListeningSocket_Throws_InvalidOperation() { using (var socket = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp)) { socket.Bind(new IPEndPoint(IPAddress.Loopback, 0)); socket.Listen(1); Assert.Throws<InvalidOperationException>(() => socket.Connect(new IPEndPoint(IPAddress.Loopback, 1))); } } [Fact] public void Connect_IPAddress_NullIPAddress_Throws_ArgumentNull() { Assert.Throws<ArgumentNullException>(() => GetSocket().Connect((IPAddress)null, 1)); } [Fact] public void Connect_IPAddress_InvalidPort_Throws_ArgumentOutOfRange() { Assert.Throws<ArgumentOutOfRangeException>(() => GetSocket().Connect(IPAddress.Loopback, -1)); Assert.Throws<ArgumentOutOfRangeException>(() => GetSocket().Connect(IPAddress.Loopback, 65536)); } [Fact] public void Connect_IPAddress_InvalidAddressFamily_Throws_NotSupported() { Assert.Throws<NotSupportedException>(() => GetSocket(AddressFamily.InterNetwork).Connect(IPAddress.IPv6Loopback, 1)); Assert.Throws<NotSupportedException>(() => GetSocket(AddressFamily.InterNetworkV6).Connect(IPAddress.Loopback, 1)); } [Fact] public void Connect_Host_NullHost_Throws_ArgumentNull() { Assert.Throws<ArgumentNullException>(() => GetSocket().Connect((string)null, 1)); } [Fact] public void Connect_Host_InvalidPort_Throws_ArgumentOutOfRange() { Assert.Throws<ArgumentOutOfRangeException>(() => GetSocket().Connect("localhost", -1)); Assert.Throws<ArgumentOutOfRangeException>(() => GetSocket().Connect("localhost", 65536)); } [Fact] public void Connect_IPAddresses_NullArray_Throws_ArgumentNull() { Assert.Throws<ArgumentNullException>(() => GetSocket().Connect((IPAddress[])null, 1)); } [Fact] public void Connect_IPAddresses_EmptyArray_Throws_Argument() { AssertExtensions.Throws<ArgumentException>("addresses", () => GetSocket().Connect(new IPAddress[0], 1)); } [Fact] public void Connect_IPAddresses_InvalidPort_Throws_ArgumentOutOfRange() { Assert.Throws<ArgumentOutOfRangeException>(() => GetSocket().Connect(new[] { IPAddress.Loopback }, -1)); Assert.Throws<ArgumentOutOfRangeException>(() => GetSocket().Connect(new[] { IPAddress.Loopback }, 65536)); } [Fact] public void Accept_NotBound_Throws_InvalidOperation() { Assert.Throws<InvalidOperationException>(() => GetSocket().Accept()); } [Fact] public void Accept_NotListening_Throws_InvalidOperation() { using (var socket = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp)) { socket.Bind(new IPEndPoint(IPAddress.Loopback, 0)); Assert.Throws<InvalidOperationException>(() => socket.Accept()); } } [Fact] public void Send_Buffer_NullBuffer_Throws_ArgumentNull() { SocketError errorCode; Assert.Throws<ArgumentNullException>(() => GetSocket().Send(null, 0, 0, SocketFlags.None, out errorCode)); } [Fact] public void Send_Buffer_InvalidOffset_Throws_ArgumentOutOfRange() { SocketError errorCode; Assert.Throws<ArgumentOutOfRangeException>(() => GetSocket().Send(s_buffer, -1, 0, SocketFlags.None, out errorCode)); Assert.Throws<ArgumentOutOfRangeException>(() => GetSocket().Send(s_buffer, s_buffer.Length + 1, 0, SocketFlags.None, out errorCode)); } [Fact] public void Send_Buffer_InvalidCount_Throws_ArgumentOutOfRange() { SocketError errorCode; Assert.Throws<ArgumentOutOfRangeException>(() => GetSocket().Send(s_buffer, 0, -1, SocketFlags.None, out errorCode)); Assert.Throws<ArgumentOutOfRangeException>(() => GetSocket().Send(s_buffer, 0, s_buffer.Length + 1, SocketFlags.None, out errorCode)); Assert.Throws<ArgumentOutOfRangeException>(() => GetSocket().Send(s_buffer, s_buffer.Length, 1, SocketFlags.None, out errorCode)); } [Fact] public void Send_Buffers_NullBuffers_Throws_ArgumentNull() { SocketError errorCode; Assert.Throws<ArgumentNullException>(() => GetSocket().Send((IList<ArraySegment<byte>>)null, SocketFlags.None, out errorCode)); } [Fact] public void Send_Buffers_EmptyBuffers_Throws_Argument() { SocketError errorCode; AssertExtensions.Throws<ArgumentException>("buffers", () => GetSocket().Send(new List<ArraySegment<byte>>(), SocketFlags.None, out errorCode)); } [Fact] public void Receive_Buffer_NullBuffer_Throws_ArgumentNull() { SocketError errorCode; Assert.Throws<ArgumentNullException>(() => GetSocket().Receive(null, 0, 0, SocketFlags.None, out errorCode)); } [Fact] public void Receive_Buffer_InvalidOffset_Throws_ArgumentOutOfRange() { SocketError errorCode; Assert.Throws<ArgumentOutOfRangeException>(() => GetSocket().Receive(s_buffer, -1, 0, SocketFlags.None, out errorCode)); Assert.Throws<ArgumentOutOfRangeException>(() => GetSocket().Receive(s_buffer, s_buffer.Length + 1, 0, SocketFlags.None, out errorCode)); } [Fact] public void Receive_Buffer_InvalidCount_Throws_ArgumentOutOfRange() { SocketError errorCode; Assert.Throws<ArgumentOutOfRangeException>(() => GetSocket().Receive(s_buffer, 0, -1, SocketFlags.None, out errorCode)); Assert.Throws<ArgumentOutOfRangeException>(() => GetSocket().Receive(s_buffer, 0, s_buffer.Length + 1, SocketFlags.None, out errorCode)); Assert.Throws<ArgumentOutOfRangeException>(() => GetSocket().Receive(s_buffer, s_buffer.Length, 1, SocketFlags.None, out errorCode)); } [Fact] public void Receive_Buffers_NullBuffers_Throws_ArgumentNull() { SocketError errorCode; Assert.Throws<ArgumentNullException>(() => GetSocket().Receive((IList<ArraySegment<byte>>)null, SocketFlags.None, out errorCode)); } [Fact] public void Receive_Buffers_EmptyBuffers_Throws_Argument() { SocketError errorCode; AssertExtensions.Throws<ArgumentException>("buffers", () => GetSocket().Receive(new List<ArraySegment<byte>>(), SocketFlags.None, out errorCode)); } [Fact] public void SetSocketOption_Object_ObjectNull_Throws_ArgumentNull() { Assert.Throws<ArgumentNullException>(() => GetSocket().SetSocketOption(SocketOptionLevel.Socket, SocketOptionName.Linger, (object)null)); } [Fact] public void SetSocketOption_Linger_NotLingerOption_Throws_Argument() { AssertExtensions.Throws<ArgumentException>("optionValue", () => GetSocket().SetSocketOption(SocketOptionLevel.Socket, SocketOptionName.Linger, new object())); } [Fact] public void SetSocketOption_Linger_InvalidLingerTime_Throws_Argument() { AssertExtensions.Throws<ArgumentException>("optionValue", () => GetSocket().SetSocketOption(SocketOptionLevel.Socket, SocketOptionName.Linger, new LingerOption(true, -1))); AssertExtensions.Throws<ArgumentException>("optionValue", () => GetSocket().SetSocketOption(SocketOptionLevel.Socket, SocketOptionName.Linger, new LingerOption(true, (int)ushort.MaxValue + 1))); } [Fact] public void SetSocketOption_IPMulticast_NotIPMulticastOption_Throws_Argument() { AssertExtensions.Throws<ArgumentException>("optionValue", () => GetSocket().SetSocketOption(SocketOptionLevel.IP, SocketOptionName.AddMembership, new object())); AssertExtensions.Throws<ArgumentException>("optionValue", () => GetSocket().SetSocketOption(SocketOptionLevel.IP, SocketOptionName.DropMembership, new object())); } [Fact] public void SetSocketOption_IPv6Multicast_NotIPMulticastOption_Throws_Argument() { AssertExtensions.Throws<ArgumentException>("optionValue", () => GetSocket().SetSocketOption(SocketOptionLevel.IPv6, SocketOptionName.AddMembership, new object())); AssertExtensions.Throws<ArgumentException>("optionValue", () => GetSocket().SetSocketOption(SocketOptionLevel.IPv6, SocketOptionName.DropMembership, new object())); } [Fact] public void SetSocketOption_Object_InvalidOptionName_Throws_Argument() { AssertExtensions.Throws<ArgumentException>("optionValue", () => GetSocket().SetSocketOption(SocketOptionLevel.Socket, SocketOptionName.NoDelay, new object())); } [Fact] public void Select_NullOrEmptyLists_Throws_ArgumentNull() { var emptyList = new List<Socket>(); Assert.Throws<ArgumentNullException>(() => Socket.Select(null, null, null, -1)); Assert.Throws<ArgumentNullException>(() => Socket.Select(emptyList, null, null, -1)); Assert.Throws<ArgumentNullException>(() => Socket.Select(null, emptyList, null, -1)); Assert.Throws<ArgumentNullException>(() => Socket.Select(emptyList, emptyList, null, -1)); Assert.Throws<ArgumentNullException>(() => Socket.Select(null, null, emptyList, -1)); Assert.Throws<ArgumentNullException>(() => Socket.Select(emptyList, null, emptyList, -1)); Assert.Throws<ArgumentNullException>(() => Socket.Select(null, emptyList, emptyList, -1)); Assert.Throws<ArgumentNullException>(() => Socket.Select(emptyList, emptyList, emptyList, -1)); } [Fact] public void Select_LargeList_Throws_ArgumentOutOfRange() { var largeList = new LargeList(); Assert.Throws<ArgumentOutOfRangeException>(() => Socket.Select(largeList, null, null, -1)); Assert.Throws<ArgumentOutOfRangeException>(() => Socket.Select(null, largeList, null, -1)); Assert.Throws<ArgumentOutOfRangeException>(() => Socket.Select(null, null, largeList, -1)); } [Fact] public void AcceptAsync_NullAsyncEventArgs_Throws_ArgumentNull() { Assert.Throws<ArgumentNullException>(() => GetSocket().AcceptAsync((SocketAsyncEventArgs)null)); } [Fact] public void AcceptAsync_BufferList_Throws_Argument() { var eventArgs = new SocketAsyncEventArgs { BufferList = s_buffers }; AssertExtensions.Throws<ArgumentException>("e", () => GetSocket().AcceptAsync(eventArgs)); } [Fact] public void AcceptAsync_NotBound_Throws_InvalidOperation() { Assert.Throws<InvalidOperationException>(() => GetSocket().AcceptAsync(s_eventArgs)); } [Fact] public void AcceptAsync_NotListening_Throws_InvalidOperation() { using (var socket = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp)) { socket.Bind(new IPEndPoint(IPAddress.Loopback, 0)); Assert.Throws<InvalidOperationException>(() => socket.AcceptAsync(s_eventArgs)); } } [Fact] public void ConnectAsync_NullAsyncEventArgs_Throws_ArgumentNull() { Assert.Throws<ArgumentNullException>(() => GetSocket().ConnectAsync((SocketAsyncEventArgs)null)); } [Fact] public void ConnectAsync_BufferList_Throws_Argument() { var eventArgs = new SocketAsyncEventArgs { BufferList = s_buffers }; AssertExtensions.Throws<ArgumentException>("BufferList", () => GetSocket().ConnectAsync(eventArgs)); } [Fact] public void ConnectAsync_NullRemoteEndPoint_Throws_ArgumentNull() { Assert.Throws<ArgumentNullException>(() => GetSocket().ConnectAsync(s_eventArgs)); } [Fact] public void ConnectAsync_ListeningSocket_Throws_InvalidOperation() { var eventArgs = new SocketAsyncEventArgs { RemoteEndPoint = new IPEndPoint(IPAddress.Loopback, 1) }; using (var socket = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp)) { socket.Bind(new IPEndPoint(IPAddress.Loopback, 0)); socket.Listen(1); Assert.Throws<InvalidOperationException>(() => socket.ConnectAsync(eventArgs)); } } [Fact] public void ConnectAsync_AddressFamily_Throws_NotSupported() { var eventArgs = new SocketAsyncEventArgs { RemoteEndPoint = new DnsEndPoint("localhost", 1, AddressFamily.InterNetworkV6) }; Assert.Throws<NotSupportedException>(() => GetSocket(AddressFamily.InterNetwork).ConnectAsync(eventArgs)); eventArgs.RemoteEndPoint = new IPEndPoint(IPAddress.IPv6Loopback, 1); Assert.Throws<NotSupportedException>(() => GetSocket(AddressFamily.InterNetwork).ConnectAsync(eventArgs)); } [Fact] public void ConnectAsync_Static_NullAsyncEventArgs_Throws_ArgumentNull() { Assert.Throws<ArgumentNullException>(() => Socket.ConnectAsync(SocketType.Stream, ProtocolType.Tcp, null)); } [Fact] public void ConnectAsync_Static_BufferList_Throws_Argument() { var eventArgs = new SocketAsyncEventArgs { BufferList = s_buffers }; AssertExtensions.Throws<ArgumentException>("e", () => Socket.ConnectAsync(SocketType.Stream, ProtocolType.Tcp, eventArgs)); } [Fact] public void ConnectAsync_Static_NullRemoteEndPoint_Throws_ArgumentException() { Assert.Throws<ArgumentException>("e", () => Socket.ConnectAsync(SocketType.Stream, ProtocolType.Tcp, s_eventArgs)); } [Fact] public void ReceiveAsync_NullAsyncEventArgs_Throws_ArgumentNull() { Assert.Throws<ArgumentNullException>(() => GetSocket().ReceiveAsync((SocketAsyncEventArgs)null)); } [Fact] public void SendAsync_NullAsyncEventArgs_Throws_ArgumentNull() { Assert.Throws<ArgumentNullException>(() => GetSocket().SendAsync((SocketAsyncEventArgs)null)); } [Fact] public void SendPacketsAsync_NullAsyncEventArgs_Throws_ArgumentNull() { Assert.Throws<ArgumentNullException>(() => GetSocket().SendPacketsAsync(null)); } [Fact] public void SendPacketsAsync_NullSendPacketsElements_Throws_ArgumentException() { Assert.Throws<ArgumentException>("e", () => GetSocket().SendPacketsAsync(s_eventArgs)); } [Fact] public void SendPacketsAsync_NotConnected_Throws_NotSupported() { var eventArgs = new SocketAsyncEventArgs { SendPacketsElements = new SendPacketsElement[0] }; Assert.Throws<NotSupportedException>(() => GetSocket().SendPacketsAsync(eventArgs)); } [Theory] [InlineData(true)] [InlineData(false)] [PlatformSpecific(TestPlatforms.AnyUnix)] // API throws PNSE on Unix public void Socket_Connect_DnsEndPoint_ExposedHandle_NotSupported(bool useSafeHandle) { using (Socket s = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp)) { if (useSafeHandle) { _ = s.SafeHandle; } else { _ = s.Handle; } Assert.Throws<PlatformNotSupportedException>(() => s.Connect(new DnsEndPoint("localhost", 12345))); } } [Fact] public async Task Socket_Connect_DnsEndPointWithIPAddressString_Supported() { using (Socket host = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp)) { host.Bind(new IPEndPoint(IPAddress.Loopback, 0)); host.Listen(1); Task accept = host.AcceptAsync(); using (Socket s = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp)) { s.Connect(new DnsEndPoint(IPAddress.Loopback.ToString(), ((IPEndPoint)host.LocalEndPoint).Port)); } await accept; } } [Theory] [InlineData(true)] [InlineData(false)] [PlatformSpecific(TestPlatforms.AnyUnix)] // API throws PNSE on Unix public void Socket_Connect_StringHost_ExposedHandle_NotSupported(bool useSafeHandle) { using (Socket s = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp)) { if (useSafeHandle) { _ = s.SafeHandle; } else { _ = s.Handle; } Assert.Throws<PlatformNotSupportedException>(() => s.Connect("localhost", 12345)); } } [Fact] public async Task Socket_Connect_IPv4AddressAsStringHost_Supported() { using (Socket host = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp)) { host.Bind(new IPEndPoint(IPAddress.Loopback, 0)); host.Listen(1); Task accept = host.AcceptAsync(); using (Socket s = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp)) { s.Connect(IPAddress.Loopback.ToString(), ((IPEndPoint)host.LocalEndPoint).Port); } await accept; } } [Fact] public async Task Socket_Connect_IPv6AddressAsStringHost_Supported() { using (Socket host = new Socket(AddressFamily.InterNetworkV6, SocketType.Stream, ProtocolType.Tcp)) { host.Bind(new IPEndPoint(IPAddress.IPv6Loopback, 0)); host.Listen(1); Task accept = host.AcceptAsync(); using (Socket s = new Socket(AddressFamily.InterNetworkV6, SocketType.Stream, ProtocolType.Tcp)) { s.Connect(IPAddress.IPv6Loopback.ToString(), ((IPEndPoint)host.LocalEndPoint).Port); } await accept; } } [Theory] [InlineData(true)] [InlineData(false)] [PlatformSpecific(TestPlatforms.AnyUnix)] // API throws PNSE on Unix public void Socket_Connect_MultipleAddresses_ExposedHandle_NotSupported(bool useSafeHandle) { using (Socket s = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp)) { if (useSafeHandle) { _ = s.SafeHandle; } else { _ = s.Handle; } Assert.Throws<PlatformNotSupportedException>(() => s.Connect(new[] { IPAddress.Loopback }, 12345)); } } [Theory] [InlineData(true)] [InlineData(false)] [PlatformSpecific(TestPlatforms.AnyUnix)] // API throws PNSE on Unix public void Socket_ConnectAsync_DnsEndPoint_ExposedHandle_NotSupported(bool useSafeHandle) { using (Socket s = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp)) { if (useSafeHandle) { _ = s.SafeHandle; } else { _ = s.Handle; } Assert.Throws<PlatformNotSupportedException>(() => { s.ConnectAsync(new DnsEndPoint("localhost", 12345)); }); } } [Fact] public async Task Socket_ConnectAsync_DnsEndPointWithIPAddressString_Supported() { using (Socket host = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp)) { host.Bind(new IPEndPoint(IPAddress.Loopback, 0)); host.Listen(1); using (Socket s = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp)) { await Task.WhenAll( host.AcceptAsync(), s.ConnectAsync(new DnsEndPoint(IPAddress.Loopback.ToString(), ((IPEndPoint)host.LocalEndPoint).Port))); } } } [Theory] [InlineData(true)] [InlineData(false)] [PlatformSpecific(TestPlatforms.AnyUnix)] // API throws PNSE on Unix public void Socket_ConnectAsync_StringHost_ExposedHandle_NotSupported(bool useSafeHandle) { using (Socket s = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp)) { if (useSafeHandle) { _ = s.SafeHandle; } else { _ = s.Handle; } Assert.Throws<PlatformNotSupportedException>(() => { s.ConnectAsync("localhost", 12345); }); } } [Fact] public async Task Socket_ConnectAsync_IPv4AddressAsStringHost_Supported() { using (Socket host = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp)) { host.Bind(new IPEndPoint(IPAddress.Loopback, 0)); host.Listen(1); using (Socket s = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp)) { await Task.WhenAll( host.AcceptAsync(), s.ConnectAsync(IPAddress.Loopback.ToString(), ((IPEndPoint)host.LocalEndPoint).Port)); } } } [Fact] public async Task Socket_ConnectAsync_IPv6AddressAsStringHost_Supported() { using (Socket host = new Socket(AddressFamily.InterNetworkV6, SocketType.Stream, ProtocolType.Tcp)) { host.Bind(new IPEndPoint(IPAddress.IPv6Loopback, 0)); host.Listen(1); using (Socket s = new Socket(AddressFamily.InterNetworkV6, SocketType.Stream, ProtocolType.Tcp)) { await Task.WhenAll( host.AcceptAsync(), s.ConnectAsync(IPAddress.IPv6Loopback.ToString(), ((IPEndPoint)host.LocalEndPoint).Port)); } } } [Theory] [PlatformSpecific(TestPlatforms.AnyUnix)] // API throws PNSE on Unix [InlineData(0)] [InlineData(1)] public void Connect_ConnectTwice_NotSupported(int invalidatingAction) { using (Socket client = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp)) { switch (invalidatingAction) { case 0: IntPtr handle = client.Handle; // exposing the underlying handle break; case 1: client.SetSocketOption(SocketOptionLevel.IP, SocketOptionName.Debug, 1); // untracked socket option break; } // // Connect once, to an invalid address, expecting failure // EndPoint ep = new IPEndPoint(IPAddress.Broadcast, 1234); Assert.ThrowsAny<SocketException>(() => client.Connect(ep)); // // Connect again, expecting PlatformNotSupportedException // Assert.Throws<PlatformNotSupportedException>(() => client.Connect(ep)); } } [Theory] [PlatformSpecific(TestPlatforms.AnyUnix)] // API throws PNSE on Unix [InlineData(0)] [InlineData(1)] public void ConnectAsync_ConnectTwice_NotSupported(int invalidatingAction) { AutoResetEvent completed = new AutoResetEvent(false); using (Socket client = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp)) { switch (invalidatingAction) { case 0: IntPtr handle = client.Handle; // exposing the underlying handle break; case 1: client.SetSocketOption(SocketOptionLevel.IP, SocketOptionName.Debug, 1); // untracked socket option break; } // // Connect once, to an invalid address, expecting failure // SocketAsyncEventArgs args = new SocketAsyncEventArgs(); args.RemoteEndPoint = new IPEndPoint(IPAddress.Broadcast, 1234); args.Completed += delegate { completed.Set(); }; if (client.ConnectAsync(args)) { Assert.True(completed.WaitOne(5000), "IPv4: Timed out while waiting for connection"); } Assert.NotEqual(SocketError.Success, args.SocketError); // // Connect again, expecting PlatformNotSupportedException // Assert.Throws<PlatformNotSupportedException>(() => client.ConnectAsync(args)); } } [Fact] public void BeginAccept_NotBound_Throws_InvalidOperation() { Assert.Throws<InvalidOperationException>(() => GetSocket().BeginAccept(TheAsyncCallback, null)); Assert.Throws<InvalidOperationException>(() => { GetSocket().AcceptAsync(); }); } [Fact] public void BeginAccept_NotListening_Throws_InvalidOperation() { using (var socket = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp)) { socket.Bind(new IPEndPoint(IPAddress.Loopback, 0)); Assert.Throws<InvalidOperationException>(() => socket.BeginAccept(TheAsyncCallback, null)); Assert.Throws<InvalidOperationException>(() => { socket.AcceptAsync(); }); } } [Fact] public void EndAccept_NullAsyncResult_Throws_ArgumentNull() { Assert.Throws<ArgumentNullException>(() => GetSocket().EndAccept(null)); } [Fact] public void BeginConnect_EndPoint_NullEndPoint_Throws_ArgumentNull() { Assert.Throws<ArgumentNullException>(() => GetSocket().BeginConnect((EndPoint)null, TheAsyncCallback, null)); Assert.Throws<ArgumentNullException>(() => { GetSocket().ConnectAsync((EndPoint)null); }); } [Fact] public void BeginConnect_EndPoint_ListeningSocket_Throws_InvalidOperation() { using (var socket = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp)) { socket.Bind(new IPEndPoint(IPAddress.Loopback, 0)); socket.Listen(1); Assert.Throws<InvalidOperationException>(() => socket.BeginConnect(new IPEndPoint(IPAddress.Loopback, 1), TheAsyncCallback, null)); Assert.Throws<InvalidOperationException>(() => { socket.ConnectAsync(new IPEndPoint(IPAddress.Loopback, 1)); }); } } [Fact] public void BeginConnect_EndPoint_AddressFamily_Throws_NotSupported() { // Unlike other tests that reuse a static Socket instance, this test avoids doing so // to work around a behavior of .NET 4.7.2. See https://github.com/dotnet/runtime/issues/26062 // for more details. using (var s = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp)) { Assert.Throws<NotSupportedException>(() => s.BeginConnect( new DnsEndPoint("localhost", 1, AddressFamily.InterNetworkV6), TheAsyncCallback, null)); } using (var s = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp)) { Assert.Throws<NotSupportedException>(() => { s.ConnectAsync( new DnsEndPoint("localhost", 1, AddressFamily.InterNetworkV6)); }); } } [Fact] public void BeginConnect_Host_NullHost_Throws_ArgumentNull() { Assert.Throws<ArgumentNullException>(() => GetSocket().BeginConnect((string)null, 1, TheAsyncCallback, null)); Assert.Throws<ArgumentNullException>(() => { GetSocket().ConnectAsync((string)null, 1); }); } [Theory] [InlineData(-1)] [InlineData(65536)] public void BeginConnect_Host_InvalidPort_Throws_ArgumentOutOfRange(int port) { Assert.Throws<ArgumentOutOfRangeException>(() => GetSocket().BeginConnect("localhost", port, TheAsyncCallback, null)); Assert.Throws<ArgumentOutOfRangeException>(() => { GetSocket().ConnectAsync("localhost", port); }); } [Fact] public void BeginConnect_Host_ListeningSocket_Throws_InvalidOperation() { using (var socket = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp)) { socket.Bind(new IPEndPoint(IPAddress.Loopback, 0)); socket.Listen(1); Assert.Throws<InvalidOperationException>(() => socket.BeginConnect("localhost", 1, TheAsyncCallback, null)); Assert.Throws<InvalidOperationException>(() => { socket.ConnectAsync("localhost", 1); }); } } [Fact] public void BeginConnect_IPAddress_NullIPAddress_Throws_ArgumentNull() { Assert.Throws<ArgumentNullException>(() => GetSocket().BeginConnect((IPAddress)null, 1, TheAsyncCallback, null)); Assert.Throws<ArgumentNullException>(() => { GetSocket().ConnectAsync((IPAddress)null, 1); }); } [Theory] [InlineData(-1)] [InlineData(65536)] public void BeginConnect_IPAddress_InvalidPort_Throws_ArgumentOutOfRange(int port) { Assert.Throws<ArgumentOutOfRangeException>(() => GetSocket().BeginConnect(IPAddress.Loopback, port, TheAsyncCallback, null)); Assert.Throws<ArgumentOutOfRangeException>(() => { GetSocket().ConnectAsync(IPAddress.Loopback, 65536); }); } [Fact] public void BeginConnect_IPAddress_AddressFamily_Throws_NotSupported() { Assert.Throws<NotSupportedException>(() => GetSocket(AddressFamily.InterNetwork).BeginConnect(IPAddress.IPv6Loopback, 1, TheAsyncCallback, null)); Assert.Throws<NotSupportedException>(() => { GetSocket(AddressFamily.InterNetwork).ConnectAsync(IPAddress.IPv6Loopback, 1); }); } [Fact] public void BeginConnect_IPAddresses_NullIPAddresses_Throws_ArgumentNull() { Assert.Throws<ArgumentNullException>(() => GetSocket().BeginConnect((IPAddress[])null, 1, TheAsyncCallback, null)); Assert.Throws<ArgumentNullException>(() => { GetSocket().ConnectAsync((IPAddress[])null, 1); }); } [Fact] public void BeginConnect_IPAddresses_EmptyIPAddresses_Throws_Argument() { AssertExtensions.Throws<ArgumentException>("addresses", () => GetSocket().BeginConnect(new IPAddress[0], 1, TheAsyncCallback, null)); AssertExtensions.Throws<ArgumentException>("addresses", () => { GetSocket().ConnectAsync(new IPAddress[0], 1); }); } [Theory] [InlineData(-1)] [InlineData(65536)] public void BeginConnect_IPAddresses_InvalidPort_Throws_ArgumentOutOfRange(int port) { Assert.Throws<ArgumentOutOfRangeException>(() => GetSocket().BeginConnect(new[] { IPAddress.Loopback }, port, TheAsyncCallback, null)); } [Theory] [InlineData(-1)] [InlineData(65536)] public async Task ConnectAsync_IPAddresses_InvalidPort_Throws_ArgumentOutOfRange(int port) { await Assert.ThrowsAsync<ArgumentOutOfRangeException>(() => GetSocket().ConnectAsync(new[] { IPAddress.Loopback }, port)); } [Fact] public void BeginConnect_IPAddresses_ListeningSocket_Throws_InvalidOperation() { using (var socket = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp)) { socket.Bind(new IPEndPoint(IPAddress.Loopback, 0)); socket.Listen(1); Assert.Throws<InvalidOperationException>(() => socket.BeginConnect(new[] { IPAddress.Loopback }, 1, TheAsyncCallback, null)); } } [Fact] public async Task ConnectAsync_IPAddresses_ListeningSocket_Throws_InvalidOperation() { using (var socket = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp)) { socket.Bind(new IPEndPoint(IPAddress.Loopback, 0)); socket.Listen(1); await Assert.ThrowsAsync<InvalidOperationException>(() => socket.ConnectAsync(new[] { IPAddress.Loopback }, 1)); } } [Fact] public void EndConnect_NullAsyncResult_Throws_ArgumentNull() { Assert.Throws<ArgumentNullException>(() => GetSocket().EndConnect(null)); } [Fact] public void EndConnect_UnrelatedAsyncResult_Throws_Argument() { AssertExtensions.Throws<ArgumentException>("asyncResult", () => GetSocket().EndConnect(Task.CompletedTask)); } [Fact] public void BeginSend_Buffer_NullBuffer_Throws_ArgumentNull() { Assert.Throws<ArgumentNullException>(() => GetSocket().BeginSend(null, 0, 0, SocketFlags.None, TheAsyncCallback, null)); Assert.Throws<ArgumentNullException>(() => { GetSocket().SendAsync(new ArraySegment<byte>(null, 0, 0), SocketFlags.None); }); } [Fact] public void BeginSend_Buffer_InvalidOffset_Throws_ArgumentOutOfRange() { Assert.Throws<ArgumentOutOfRangeException>(() => GetSocket().BeginSend(s_buffer, -1, 0, SocketFlags.None, TheAsyncCallback, null)); Assert.Throws<ArgumentOutOfRangeException>(() => GetSocket().BeginSend(s_buffer, s_buffer.Length + 1, 0, SocketFlags.None, TheAsyncCallback, null)); Assert.Throws<ArgumentOutOfRangeException>(() => { GetSocket().SendAsync(new ArraySegment<byte>(s_buffer, -1, 0), SocketFlags.None); }); Assert.ThrowsAny<ArgumentException>(() => { GetSocket().SendAsync(new ArraySegment<byte>(s_buffer, s_buffer.Length + 1, 0), SocketFlags.None); }); } [Fact] public void BeginSend_Buffer_InvalidCount_Throws_ArgumentOutOfRange() { Assert.Throws<ArgumentOutOfRangeException>(() => GetSocket().BeginSend(s_buffer, 0, -1, SocketFlags.None, TheAsyncCallback, null)); Assert.Throws<ArgumentOutOfRangeException>(() => GetSocket().BeginSend(s_buffer, 0, s_buffer.Length + 1, SocketFlags.None, TheAsyncCallback, null)); Assert.Throws<ArgumentOutOfRangeException>(() => GetSocket().BeginSend(s_buffer, s_buffer.Length, 1, SocketFlags.None, TheAsyncCallback, null)); Assert.Throws<ArgumentOutOfRangeException>(() => { GetSocket().SendAsync(new ArraySegment<byte>(s_buffer, 0, -1), SocketFlags.None); }); Assert.ThrowsAny<ArgumentException>(() => { GetSocket().SendAsync(new ArraySegment<byte>(s_buffer, 0, s_buffer.Length + 1), SocketFlags.None); }); Assert.ThrowsAny<ArgumentException>(() => { GetSocket().SendAsync(new ArraySegment<byte>(s_buffer, s_buffer.Length, 1), SocketFlags.None); }); } [Fact] public void BeginSend_Buffers_NullBuffers_Throws_ArgumentNull() { Assert.Throws<ArgumentNullException>(() => GetSocket().BeginSend(null, SocketFlags.None, TheAsyncCallback, null)); Assert.Throws<ArgumentNullException>(() => { GetSocket().SendAsync((IList<ArraySegment<byte>>)null, SocketFlags.None); }); } [Fact] public void BeginSend_Buffers_EmptyBuffers_Throws_Argument() { AssertExtensions.Throws<ArgumentException>("buffers", () => GetSocket().BeginSend(new List<ArraySegment<byte>>(), SocketFlags.None, TheAsyncCallback, null)); AssertExtensions.Throws<ArgumentException>("buffers", () => { GetSocket().SendAsync(new List<ArraySegment<byte>>(), SocketFlags.None); }); } [Fact] public void EndSend_NullAsyncResult_Throws_ArgumentNull() { Assert.Throws<ArgumentNullException>(() => GetSocket().EndSend(null)); } [Fact] public void EndSend_UnrelatedAsyncResult_Throws_Argument() { AssertExtensions.Throws<ArgumentException>("asyncResult", () => GetSocket().EndSend(Task.CompletedTask)); } [Fact] public void BeginReceive_Buffer_NullBuffer_Throws_ArgumentNull() { Assert.Throws<ArgumentNullException>(() => GetSocket().BeginReceive(null, 0, 0, SocketFlags.None, TheAsyncCallback, null)); Assert.Throws<ArgumentNullException>(() => { GetSocket().ReceiveAsync(new ArraySegment<byte>(null, 0, 0), SocketFlags.None); }); } [Fact] public void BeginReceive_Buffer_InvalidOffset_Throws_ArgumentOutOfRange() { Assert.Throws<ArgumentOutOfRangeException>(() => GetSocket().BeginReceive(s_buffer, -1, 0, SocketFlags.None, TheAsyncCallback, null)); Assert.Throws<ArgumentOutOfRangeException>(() => GetSocket().BeginReceive(s_buffer, s_buffer.Length + 1, 0, SocketFlags.None, TheAsyncCallback, null)); Assert.Throws<ArgumentOutOfRangeException>(() => { GetSocket().ReceiveAsync(new ArraySegment<byte>(s_buffer, -1, 0), SocketFlags.None); }); Assert.ThrowsAny<ArgumentException>(() => { GetSocket().ReceiveAsync(new ArraySegment<byte>(s_buffer, s_buffer.Length + 1, 0), SocketFlags.None); }); } [Fact] public void BeginReceive_Buffer_InvalidCount_Throws_ArgumentOutOfRange() { Assert.Throws<ArgumentOutOfRangeException>(() => GetSocket().BeginReceive(s_buffer, 0, -1, SocketFlags.None, TheAsyncCallback, null)); Assert.Throws<ArgumentOutOfRangeException>(() => GetSocket().BeginReceive(s_buffer, 0, s_buffer.Length + 1, SocketFlags.None, TheAsyncCallback, null)); Assert.Throws<ArgumentOutOfRangeException>(() => GetSocket().BeginReceive(s_buffer, s_buffer.Length, 1, SocketFlags.None, TheAsyncCallback, null)); Assert.Throws<ArgumentOutOfRangeException>(() => { GetSocket().ReceiveAsync(new ArraySegment<byte>(s_buffer, 0, -1), SocketFlags.None); }); Assert.ThrowsAny<ArgumentException>(() => { GetSocket().ReceiveAsync(new ArraySegment<byte>(s_buffer, 0, s_buffer.Length + 1), SocketFlags.None); }); Assert.ThrowsAny<ArgumentException>(() => { GetSocket().ReceiveAsync(new ArraySegment<byte>(s_buffer, s_buffer.Length, 1), SocketFlags.None); }); } [Fact] public void BeginReceive_Buffers_NullBuffers_Throws_ArgumentNull() { Assert.Throws<ArgumentNullException>(() => GetSocket().BeginReceive(null, SocketFlags.None, TheAsyncCallback, null)); Assert.Throws<ArgumentNullException>(() => { GetSocket().ReceiveAsync((IList<ArraySegment<byte>>)null, SocketFlags.None); }); } [Fact] public void BeginReceive_Buffers_EmptyBuffers_Throws_Argument() { AssertExtensions.Throws<ArgumentException>("buffers", () => GetSocket().BeginReceive(new List<ArraySegment<byte>>(), SocketFlags.None, TheAsyncCallback, null)); AssertExtensions.Throws<ArgumentException>("buffers", () => { GetSocket().ReceiveAsync(new List<ArraySegment<byte>>(), SocketFlags.None); }); } [Fact] public void EndReceive_NullAsyncResult_Throws_ArgumentNull() { Assert.Throws<ArgumentNullException>(() => GetSocket().EndReceive(null)); } [Fact] public void CancelConnectAsync_NullEventArgs_Throws_ArgumentNull() { Assert.Throws<ArgumentNullException>(() => Socket.CancelConnectAsync(null)); } } }
-1
dotnet/runtime
66,235
Revert "Fix issues related to JsonSerializerOptions mutation and caching."
Reverts dotnet/runtime#65863 Fixes #66232
jkotas
2022-03-05T06:23:45Z
2022-03-05T14:08:07Z
17662fc30cfd4cc6e7dbba978a3fb512380c0b70
3ede8095da51d5d27a890020b61376155e9a61c2
Revert "Fix issues related to JsonSerializerOptions mutation and caching.". Reverts dotnet/runtime#65863 Fixes #66232
./src/libraries/System.Drawing.Common/tests/Drawing2D/AdjustableArrowCapTests.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using Xunit; namespace System.Drawing.Drawing2D.Tests { public class AdjustableArrowCapTests { public static IEnumerable<object[]> Ctor_Float_Float_TestData() { yield return new object[] { 1f, 1f }; yield return new object[] { 50f, 50f }; yield return new object[] { float.MaxValue, float.MaxValue }; // Nonsensical values -- but still permitted. yield return new object[] { -1f, 1f }; yield return new object[] { float.PositiveInfinity, 1f }; yield return new object[] { float.NegativeInfinity, 1f }; yield return new object[] { float.NaN, 1f }; yield return new object[] { 0f, 1f }; yield return new object[] { 0f, 0f }; yield return new object[] { 1f, -1f }; } [ConditionalTheory(Helpers.IsDrawingSupported)] [MemberData(nameof(Ctor_Float_Float_TestData))] public void Ctor_Float_Float(float width, float height) { using (AdjustableArrowCap arrowCap = new AdjustableArrowCap(width, height)) { Assert.Equal(width, arrowCap.Width); Assert.Equal(height, arrowCap.Height); Assert.True(arrowCap.Filled); } } public static IEnumerable<object[]> Ctor_Float_Float_Bool_TestData() { foreach (object[] data in Ctor_Float_Float_TestData()) { yield return new object[] { data[0], data[1], true }; yield return new object[] { data[0], data[1], false }; } } [ConditionalTheory(Helpers.IsDrawingSupported)] [MemberData(nameof(Ctor_Float_Float_Bool_TestData))] public void Ctor_Float_Float_Bool(float width, float height, bool filled) { using (AdjustableArrowCap arrowCap = new AdjustableArrowCap(width, height, filled)) { Assert.Equal(width, arrowCap.Width); Assert.Equal(height, arrowCap.Height); Assert.Equal(filled, arrowCap.Filled); } } public static IEnumerable<object[]> Properties_TestData() { yield return new object[] { -1 }; yield return new object[] { 0 }; yield return new object[] { 10 }; yield return new object[] { 5000 }; yield return new object[] { float.MaxValue }; yield return new object[] { float.PositiveInfinity }; yield return new object[] { float.NegativeInfinity }; yield return new object[] { float.NaN }; } [ConditionalTheory(Helpers.IsDrawingSupported)] [MemberData(nameof(Properties_TestData))] public void Width_Set_GetReturnsExpected(float width) { using (AdjustableArrowCap arrowCap = new AdjustableArrowCap(1, 1)) { arrowCap.Width = width; Assert.Equal(width, arrowCap.Width); } } [ConditionalTheory(Helpers.IsDrawingSupported)] [MemberData(nameof(Properties_TestData))] public void Height_Set_GetReturnsExpected(float height) { using (AdjustableArrowCap arrowCap = new AdjustableArrowCap(1, 1)) { arrowCap.Height = height; Assert.Equal(height, arrowCap.Height); } } [ConditionalTheory(Helpers.IsDrawingSupported)] [MemberData(nameof(Properties_TestData))] public void MiddleInset_Set_GetReturnsExpected(float middleInset) { using (AdjustableArrowCap arrowCap = new AdjustableArrowCap(1, 1)) { arrowCap.MiddleInset = middleInset; Assert.Equal(middleInset, arrowCap.MiddleInset); } } [ConditionalTheory(Helpers.IsDrawingSupported)] [InlineData(true)] [InlineData(false)] public void Filled_Set_GetReturnsExpected(bool filled) { using (AdjustableArrowCap arrowCap = new AdjustableArrowCap(1, 1)) { arrowCap.Filled = filled; Assert.Equal(filled, arrowCap.Filled); } } [ConditionalFact(Helpers.IsDrawingSupported)] public void Clone_Success() { using (AdjustableArrowCap arrowCap = new AdjustableArrowCap(1, 1)) using (AdjustableArrowCap clone = Assert.IsType<AdjustableArrowCap>(arrowCap.Clone())) { Assert.NotSame(clone, arrowCap); Assert.Equal(clone.Width, arrowCap.Width); Assert.Equal(clone.Height, arrowCap.Height); Assert.Equal(clone.MiddleInset, arrowCap.MiddleInset); Assert.Equal(clone.Filled, arrowCap.Filled); } } [ConditionalFact(Helpers.IsWindowsOrAtLeastLibgdiplus6)] public void BaseCap_ReturnsTriangle() { using (AdjustableArrowCap arrowCap = new AdjustableArrowCap(1, 1)) { Assert.Equal(LineCap.Triangle, arrowCap.BaseCap); } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using Xunit; namespace System.Drawing.Drawing2D.Tests { public class AdjustableArrowCapTests { public static IEnumerable<object[]> Ctor_Float_Float_TestData() { yield return new object[] { 1f, 1f }; yield return new object[] { 50f, 50f }; yield return new object[] { float.MaxValue, float.MaxValue }; // Nonsensical values -- but still permitted. yield return new object[] { -1f, 1f }; yield return new object[] { float.PositiveInfinity, 1f }; yield return new object[] { float.NegativeInfinity, 1f }; yield return new object[] { float.NaN, 1f }; yield return new object[] { 0f, 1f }; yield return new object[] { 0f, 0f }; yield return new object[] { 1f, -1f }; } [ConditionalTheory(Helpers.IsDrawingSupported)] [MemberData(nameof(Ctor_Float_Float_TestData))] public void Ctor_Float_Float(float width, float height) { using (AdjustableArrowCap arrowCap = new AdjustableArrowCap(width, height)) { Assert.Equal(width, arrowCap.Width); Assert.Equal(height, arrowCap.Height); Assert.True(arrowCap.Filled); } } public static IEnumerable<object[]> Ctor_Float_Float_Bool_TestData() { foreach (object[] data in Ctor_Float_Float_TestData()) { yield return new object[] { data[0], data[1], true }; yield return new object[] { data[0], data[1], false }; } } [ConditionalTheory(Helpers.IsDrawingSupported)] [MemberData(nameof(Ctor_Float_Float_Bool_TestData))] public void Ctor_Float_Float_Bool(float width, float height, bool filled) { using (AdjustableArrowCap arrowCap = new AdjustableArrowCap(width, height, filled)) { Assert.Equal(width, arrowCap.Width); Assert.Equal(height, arrowCap.Height); Assert.Equal(filled, arrowCap.Filled); } } public static IEnumerable<object[]> Properties_TestData() { yield return new object[] { -1 }; yield return new object[] { 0 }; yield return new object[] { 10 }; yield return new object[] { 5000 }; yield return new object[] { float.MaxValue }; yield return new object[] { float.PositiveInfinity }; yield return new object[] { float.NegativeInfinity }; yield return new object[] { float.NaN }; } [ConditionalTheory(Helpers.IsDrawingSupported)] [MemberData(nameof(Properties_TestData))] public void Width_Set_GetReturnsExpected(float width) { using (AdjustableArrowCap arrowCap = new AdjustableArrowCap(1, 1)) { arrowCap.Width = width; Assert.Equal(width, arrowCap.Width); } } [ConditionalTheory(Helpers.IsDrawingSupported)] [MemberData(nameof(Properties_TestData))] public void Height_Set_GetReturnsExpected(float height) { using (AdjustableArrowCap arrowCap = new AdjustableArrowCap(1, 1)) { arrowCap.Height = height; Assert.Equal(height, arrowCap.Height); } } [ConditionalTheory(Helpers.IsDrawingSupported)] [MemberData(nameof(Properties_TestData))] public void MiddleInset_Set_GetReturnsExpected(float middleInset) { using (AdjustableArrowCap arrowCap = new AdjustableArrowCap(1, 1)) { arrowCap.MiddleInset = middleInset; Assert.Equal(middleInset, arrowCap.MiddleInset); } } [ConditionalTheory(Helpers.IsDrawingSupported)] [InlineData(true)] [InlineData(false)] public void Filled_Set_GetReturnsExpected(bool filled) { using (AdjustableArrowCap arrowCap = new AdjustableArrowCap(1, 1)) { arrowCap.Filled = filled; Assert.Equal(filled, arrowCap.Filled); } } [ConditionalFact(Helpers.IsDrawingSupported)] public void Clone_Success() { using (AdjustableArrowCap arrowCap = new AdjustableArrowCap(1, 1)) using (AdjustableArrowCap clone = Assert.IsType<AdjustableArrowCap>(arrowCap.Clone())) { Assert.NotSame(clone, arrowCap); Assert.Equal(clone.Width, arrowCap.Width); Assert.Equal(clone.Height, arrowCap.Height); Assert.Equal(clone.MiddleInset, arrowCap.MiddleInset); Assert.Equal(clone.Filled, arrowCap.Filled); } } [ConditionalFact(Helpers.IsWindowsOrAtLeastLibgdiplus6)] public void BaseCap_ReturnsTriangle() { using (AdjustableArrowCap arrowCap = new AdjustableArrowCap(1, 1)) { Assert.Equal(LineCap.Triangle, arrowCap.BaseCap); } } } }
-1
dotnet/runtime
66,235
Revert "Fix issues related to JsonSerializerOptions mutation and caching."
Reverts dotnet/runtime#65863 Fixes #66232
jkotas
2022-03-05T06:23:45Z
2022-03-05T14:08:07Z
17662fc30cfd4cc6e7dbba978a3fb512380c0b70
3ede8095da51d5d27a890020b61376155e9a61c2
Revert "Fix issues related to JsonSerializerOptions mutation and caching.". Reverts dotnet/runtime#65863 Fixes #66232
./src/libraries/System.Data.Common/tests/System/Data/SqlTypes/SqlInt32Test.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // (C) Tim Coleman // (C) 2003 Martin Willemoes Hansen // Copyright (C) 2004 Novell, Inc (http://www.novell.com) // // Permission is hereby granted, free of charge, to any person obtaining // a copy of this software and associated documentation files (the // "Software"), to deal in the Software without restriction, including // without limitation the rights to use, copy, modify, merge, publish, // distribute, sublicense, and/or sell copies of the Software, and to // permit persons to whom the Software is furnished to do so, subject to // the following conditions: // // The above copyright notice and this permission notice shall be // included in all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, // EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND // NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE // LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION // OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION // WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. using System.Data.SqlTypes; using System.IO; using System.Xml; using System.Xml.Serialization; using Xunit; namespace System.Data.Tests.SqlTypes { public class SqlInt32Test { [Fact] public void Create() { SqlInt32 foo = new SqlInt32(5); Assert.Equal(5, (int)foo); } [Fact] public void Add() { int a = 5; int b = 7; SqlInt32 x; SqlInt32 y; SqlInt32 z; x = new SqlInt32(a); y = new SqlInt32(b); z = x + y; Assert.Equal(a + b, z.Value); z = SqlInt32.Add(x, y); Assert.Equal(a + b, z.Value); } [Fact] public void BitwiseAnd() { int a = 5; int b = 7; SqlInt32 x = new SqlInt32(a); SqlInt32 y = new SqlInt32(b); SqlInt32 z = x & y; Assert.Equal(a & b, z.Value); z = SqlInt32.BitwiseAnd(x, y); Assert.Equal(a & b, z.Value); } [Fact] public void BitwiseOr() { int a = 5; int b = 7; SqlInt32 x = new SqlInt32(a); SqlInt32 y = new SqlInt32(b); SqlInt32 z = x | y; Assert.Equal(a | b, z.Value); z = SqlInt32.BitwiseOr(x, y); Assert.Equal(a | b, z.Value); } [Fact] public void Divide() { int a = 5; int b = 7; SqlInt32 x = new SqlInt32(a); SqlInt32 y = new SqlInt32(b); SqlInt32 z = x / y; Assert.Equal(a / b, z.Value); z = SqlInt32.Divide(x, y); Assert.Equal(a / b, z.Value); } [Fact] public void EqualsTest() { SqlInt32 x; SqlInt32 y; // Case 1: either is SqlInt32.Null x = SqlInt32.Null; y = new SqlInt32(5); Assert.Equal(SqlBoolean.Null, x == y); Assert.Equal(SqlBoolean.Null, SqlInt32.Equals(x, y)); // Case 2: both are SqlInt32.Null y = SqlInt32.Null; Assert.Equal(SqlBoolean.Null, x == y); Assert.Equal(SqlBoolean.Null, SqlInt32.Equals(x, y)); // Case 3: both are equal x = new SqlInt32(5); y = new SqlInt32(5); Assert.Equal(SqlBoolean.True, x == y); Assert.Equal(SqlBoolean.True, SqlInt32.Equals(x, y)); // Case 4: inequality x = new SqlInt32(5); y = new SqlInt32(6); Assert.Equal(SqlBoolean.False, x == y); Assert.Equal(SqlBoolean.False, SqlInt32.Equals(x, y)); } [Fact] public void GreaterThan() { SqlInt32 x; SqlInt32 y; // Case 1: either is SqlInt32.Null x = SqlInt32.Null; y = new SqlInt32(5); Assert.Equal(SqlBoolean.Null, x > y); Assert.Equal(SqlBoolean.Null, SqlInt32.GreaterThan(x, y)); // Case 2: both are SqlInt32.Null y = SqlInt32.Null; Assert.Equal(SqlBoolean.Null, x > y); Assert.Equal(SqlBoolean.Null, SqlInt32.GreaterThan(x, y)); // Case 3: x > y x = new SqlInt32(5); y = new SqlInt32(4); Assert.Equal(SqlBoolean.True, x > y); Assert.Equal(SqlBoolean.True, SqlInt32.GreaterThan(x, y)); // Case 4: x < y x = new SqlInt32(5); y = new SqlInt32(6); Assert.Equal(SqlBoolean.False, x > y); Assert.Equal(SqlBoolean.False, SqlInt32.GreaterThan(x, y)); } [Fact] public void GreaterThanOrEqual() { SqlInt32 x; SqlInt32 y; // Case 1: either is SqlInt32.Null x = SqlInt32.Null; y = new SqlInt32(5); Assert.Equal(SqlBoolean.Null, x >= y); Assert.Equal(SqlBoolean.Null, SqlInt32.GreaterThanOrEqual(x, y)); // Case 2: both are SqlInt32.Null y = SqlInt32.Null; Assert.Equal(SqlBoolean.Null, x >= y); Assert.Equal(SqlBoolean.Null, SqlInt32.GreaterThanOrEqual(x, y)); // Case 3: x > y x = new SqlInt32(5); y = new SqlInt32(4); Assert.Equal(SqlBoolean.True, x >= y); Assert.Equal(SqlBoolean.True, SqlInt32.GreaterThanOrEqual(x, y)); // Case 4: x < y x = new SqlInt32(5); y = new SqlInt32(6); Assert.Equal(SqlBoolean.False, x >= y); Assert.Equal(SqlBoolean.False, SqlInt32.GreaterThanOrEqual(x, y)); // Case 5: x == y x = new SqlInt32(5); y = new SqlInt32(5); Assert.Equal(SqlBoolean.True, x >= y); Assert.Equal(SqlBoolean.True, SqlInt32.GreaterThanOrEqual(x, y)); } [Fact] public void LessThan() { SqlInt32 x; SqlInt32 y; // Case 1: either is SqlInt32.Null x = SqlInt32.Null; y = new SqlInt32(5); Assert.Equal(SqlBoolean.Null, x < y); Assert.Equal(SqlBoolean.Null, SqlInt32.LessThan(x, y)); // Case 2: both are SqlInt32.Null y = SqlInt32.Null; Assert.Equal(SqlBoolean.Null, x < y); Assert.Equal(SqlBoolean.Null, SqlInt32.LessThan(x, y)); // Case 3: x > y x = new SqlInt32(5); y = new SqlInt32(4); Assert.Equal(SqlBoolean.False, x < y); Assert.Equal(SqlBoolean.False, SqlInt32.LessThan(x, y)); // Case 4: x < y x = new SqlInt32(5); y = new SqlInt32(6); Assert.Equal(SqlBoolean.True, x < y); Assert.Equal(SqlBoolean.True, SqlInt32.LessThan(x, y)); } [Fact] public void LessThanOrEqual() { SqlInt32 x; SqlInt32 y; // Case 1: either is SqlInt32.Null x = SqlInt32.Null; y = new SqlInt32(5); Assert.Equal(SqlBoolean.Null, x <= y); Assert.Equal(SqlBoolean.Null, SqlInt32.LessThanOrEqual(x, y)); // Case 2: both are SqlInt32.Null y = SqlInt32.Null; Assert.Equal(SqlBoolean.Null, x <= y); Assert.Equal(SqlBoolean.Null, SqlInt32.LessThanOrEqual(x, y)); // Case 3: x > y x = new SqlInt32(5); y = new SqlInt32(4); Assert.Equal(SqlBoolean.False, x <= y); Assert.Equal(SqlBoolean.False, SqlInt32.LessThanOrEqual(x, y)); // Case 4: x < y x = new SqlInt32(5); y = new SqlInt32(6); Assert.Equal(SqlBoolean.True, x <= y); Assert.Equal(SqlBoolean.True, SqlInt32.LessThanOrEqual(x, y)); // Case 5: x == y x = new SqlInt32(5); y = new SqlInt32(5); Assert.Equal(SqlBoolean.True, x <= y); Assert.Equal(SqlBoolean.True, SqlInt32.LessThanOrEqual(x, y)); } [Fact] public void Mod() { int a = 5; int b = 7; SqlInt32 x = new SqlInt32(a); SqlInt32 y = new SqlInt32(b); SqlInt32 z = x % y; Assert.Equal(a % b, z.Value); z = SqlInt32.Mod(x, y); Assert.Equal(a % b, z.Value); } [Fact] public void Modulus() { int a = 50; int b = 7; SqlInt32 x = new SqlInt32(a); SqlInt32 y = new SqlInt32(b); SqlInt32 z = x % y; Assert.Equal(a % b, z.Value); z = SqlInt32.Modulus(x, y); Assert.Equal(a % b, z.Value); } [Fact] public void Multiply() { int a = 5; int b = 7; SqlInt32 x = new SqlInt32(a); SqlInt32 y = new SqlInt32(b); SqlInt32 z = x * y; Assert.Equal(a * b, z.Value); z = SqlInt32.Multiply(x, y); Assert.Equal(a * b, z.Value); } [Fact] public void NotEquals() { SqlInt32 x; SqlInt32 y; x = new SqlInt32(5); y = SqlInt32.Null; Assert.Equal(SqlBoolean.Null, x != y); Assert.Equal(SqlBoolean.Null, SqlInt32.NotEquals(x, y)); y = new SqlInt32(5); Assert.Equal(SqlBoolean.False, x != y); Assert.Equal(SqlBoolean.False, SqlInt32.NotEquals(x, y)); y = new SqlInt32(6); Assert.Equal(SqlBoolean.True, x != y); Assert.Equal(SqlBoolean.True, SqlInt32.NotEquals(x, y)); } [Fact] public void OnesComplement() { int a = 5; SqlInt32 x = new SqlInt32(a); SqlInt32 z = ~x; Assert.Equal(~a, z.Value); z = SqlInt32.OnesComplement(x); Assert.Equal(~a, z.Value); } [Fact] public void IsNullProperty() { SqlInt32 n = SqlInt32.Null; Assert.True(n.IsNull); } [Fact] public void Subtract() { int a = 7; int b = 5; SqlInt32 x = new SqlInt32(a); SqlInt32 y = new SqlInt32(b); SqlInt32 z = x - y; Assert.Equal(a - b, z.Value); z = SqlInt32.Subtract(x, y); Assert.Equal(a - b, z.Value); } [Fact] public void ConversionMethods() { SqlInt32 x; // Case 1: SqlInt32.Null -> SqlBoolean == SqlBoolean.Null x = SqlInt32.Null; Assert.Equal(SqlBoolean.Null, x.ToSqlBoolean()); // Case 2: SqlInt32.Zero -> SqlBoolean == False x = SqlInt32.Zero; Assert.Equal(SqlBoolean.False, x.ToSqlBoolean()); // Case 3: SqlInt32(nonzero) -> SqlBoolean == True x = new SqlInt32(27); Assert.Equal(SqlBoolean.True, x.ToSqlBoolean()); // Case 4: SqlInt32.Null -> SqlByte == SqlByte.Null x = SqlInt32.Null; Assert.Equal(SqlByte.Null, x.ToSqlByte()); // Case 5: Test non-null conversion to SqlByte x = new SqlInt32(27); Assert.Equal((byte)27, x.ToSqlByte().Value); // Case 6: SqlInt32.Null -> SqlDecimal == SqlDecimal.Null x = SqlInt32.Null; Assert.Equal(SqlDecimal.Null, x.ToSqlDecimal()); // Case 7: Test non-null conversion to SqlDecimal x = new SqlInt32(27); Assert.Equal(27, x.ToSqlDecimal().Value); // Case 8: SqlInt32.Null -> SqlDouble == SqlDouble.Null x = SqlInt32.Null; Assert.Equal(SqlDouble.Null, x.ToSqlDouble()); // Case 9: Test non-null conversion to SqlDouble x = new SqlInt32(27); Assert.Equal(27, x.ToSqlDouble().Value); // Case 10: SqlInt32.Null -> SqlInt16 == SqlInt16.Null x = SqlInt32.Null; Assert.Equal(SqlInt16.Null, x.ToSqlInt16()); // Case 11: Test non-null conversion to SqlInt16 x = new SqlInt32(27); Assert.Equal((short)27, x.ToSqlInt16().Value); // Case 12: SqlInt32.Null -> SqlInt64 == SqlInt64.Null x = SqlInt32.Null; Assert.Equal(SqlInt64.Null, x.ToSqlInt64()); // Case 13: Test non-null conversion to SqlInt64 x = new SqlInt32(27); Assert.Equal(27, x.ToSqlInt64().Value); // Case 14: SqlInt32.Null -> SqlMoney == SqlMoney.Null x = SqlInt32.Null; Assert.Equal(SqlMoney.Null, x.ToSqlMoney()); // Case 15: Test non-null conversion to SqlMoney x = new SqlInt32(27); Assert.Equal(27.0000M, x.ToSqlMoney().Value); // Case 16: SqlInt32.Null -> SqlSingle == SqlSingle.Null x = SqlInt32.Null; Assert.Equal(SqlSingle.Null, x.ToSqlSingle()); // Case 17: Test non-null conversion to SqlSingle x = new SqlInt32(27); Assert.Equal(27, x.ToSqlSingle().Value); } [Fact] public void Xor() { int a = 5; int b = 7; SqlInt32 x = new SqlInt32(a); SqlInt32 y = new SqlInt32(b); SqlInt32 z = x ^ y; Assert.Equal(a ^ b, z.Value); z = SqlInt32.Xor(x, y); Assert.Equal(a ^ b, z.Value); } [Fact] public void GetXsdTypeTest() { XmlQualifiedName qualifiedName = SqlInt32.GetXsdType(null); Assert.Equal("int", qualifiedName.Name); } internal void ReadWriteXmlTestInternal(string xml, int testval, string unit_test_id) { SqlInt32 test; SqlInt32 test1; XmlSerializer ser; StringWriter sw; XmlTextWriter xw; StringReader sr; XmlTextReader xr; test = new SqlInt32(testval); ser = new XmlSerializer(typeof(SqlInt32)); sw = new StringWriter(); xw = new XmlTextWriter(sw); ser.Serialize(xw, test); // Assert.Equal (xml, sw.ToString ()); sr = new StringReader(xml); xr = new XmlTextReader(sr); test1 = (SqlInt32)ser.Deserialize(xr); Assert.Equal(testval, test1.Value); } [Fact] //[Category ("MobileNotWorking")] public void ReadWriteXmlTest() { string xml1 = "<?xml version=\"1.0\" encoding=\"utf-16\"?><int>4556</int>"; string xml2 = "<?xml version=\"1.0\" encoding=\"utf-16\"?><int>-6445</int>"; string xml3 = "<?xml version=\"1.0\" encoding=\"utf-16\"?><int>0x455687AB3E4D56F</int>"; int test1 = 4556; int test2 = -6445; int test3 = 0x4F56; ReadWriteXmlTestInternal(xml1, test1, "BA01"); ReadWriteXmlTestInternal(xml2, test2, "BA02"); InvalidOperationException ex = Assert.Throws<InvalidOperationException>(() => ReadWriteXmlTestInternal(xml3, test3, "#BA03")); Assert.Equal(typeof(FormatException), ex.InnerException.GetType()); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // (C) Tim Coleman // (C) 2003 Martin Willemoes Hansen // Copyright (C) 2004 Novell, Inc (http://www.novell.com) // // Permission is hereby granted, free of charge, to any person obtaining // a copy of this software and associated documentation files (the // "Software"), to deal in the Software without restriction, including // without limitation the rights to use, copy, modify, merge, publish, // distribute, sublicense, and/or sell copies of the Software, and to // permit persons to whom the Software is furnished to do so, subject to // the following conditions: // // The above copyright notice and this permission notice shall be // included in all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, // EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND // NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE // LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION // OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION // WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. using System.Data.SqlTypes; using System.IO; using System.Xml; using System.Xml.Serialization; using Xunit; namespace System.Data.Tests.SqlTypes { public class SqlInt32Test { [Fact] public void Create() { SqlInt32 foo = new SqlInt32(5); Assert.Equal(5, (int)foo); } [Fact] public void Add() { int a = 5; int b = 7; SqlInt32 x; SqlInt32 y; SqlInt32 z; x = new SqlInt32(a); y = new SqlInt32(b); z = x + y; Assert.Equal(a + b, z.Value); z = SqlInt32.Add(x, y); Assert.Equal(a + b, z.Value); } [Fact] public void BitwiseAnd() { int a = 5; int b = 7; SqlInt32 x = new SqlInt32(a); SqlInt32 y = new SqlInt32(b); SqlInt32 z = x & y; Assert.Equal(a & b, z.Value); z = SqlInt32.BitwiseAnd(x, y); Assert.Equal(a & b, z.Value); } [Fact] public void BitwiseOr() { int a = 5; int b = 7; SqlInt32 x = new SqlInt32(a); SqlInt32 y = new SqlInt32(b); SqlInt32 z = x | y; Assert.Equal(a | b, z.Value); z = SqlInt32.BitwiseOr(x, y); Assert.Equal(a | b, z.Value); } [Fact] public void Divide() { int a = 5; int b = 7; SqlInt32 x = new SqlInt32(a); SqlInt32 y = new SqlInt32(b); SqlInt32 z = x / y; Assert.Equal(a / b, z.Value); z = SqlInt32.Divide(x, y); Assert.Equal(a / b, z.Value); } [Fact] public void EqualsTest() { SqlInt32 x; SqlInt32 y; // Case 1: either is SqlInt32.Null x = SqlInt32.Null; y = new SqlInt32(5); Assert.Equal(SqlBoolean.Null, x == y); Assert.Equal(SqlBoolean.Null, SqlInt32.Equals(x, y)); // Case 2: both are SqlInt32.Null y = SqlInt32.Null; Assert.Equal(SqlBoolean.Null, x == y); Assert.Equal(SqlBoolean.Null, SqlInt32.Equals(x, y)); // Case 3: both are equal x = new SqlInt32(5); y = new SqlInt32(5); Assert.Equal(SqlBoolean.True, x == y); Assert.Equal(SqlBoolean.True, SqlInt32.Equals(x, y)); // Case 4: inequality x = new SqlInt32(5); y = new SqlInt32(6); Assert.Equal(SqlBoolean.False, x == y); Assert.Equal(SqlBoolean.False, SqlInt32.Equals(x, y)); } [Fact] public void GreaterThan() { SqlInt32 x; SqlInt32 y; // Case 1: either is SqlInt32.Null x = SqlInt32.Null; y = new SqlInt32(5); Assert.Equal(SqlBoolean.Null, x > y); Assert.Equal(SqlBoolean.Null, SqlInt32.GreaterThan(x, y)); // Case 2: both are SqlInt32.Null y = SqlInt32.Null; Assert.Equal(SqlBoolean.Null, x > y); Assert.Equal(SqlBoolean.Null, SqlInt32.GreaterThan(x, y)); // Case 3: x > y x = new SqlInt32(5); y = new SqlInt32(4); Assert.Equal(SqlBoolean.True, x > y); Assert.Equal(SqlBoolean.True, SqlInt32.GreaterThan(x, y)); // Case 4: x < y x = new SqlInt32(5); y = new SqlInt32(6); Assert.Equal(SqlBoolean.False, x > y); Assert.Equal(SqlBoolean.False, SqlInt32.GreaterThan(x, y)); } [Fact] public void GreaterThanOrEqual() { SqlInt32 x; SqlInt32 y; // Case 1: either is SqlInt32.Null x = SqlInt32.Null; y = new SqlInt32(5); Assert.Equal(SqlBoolean.Null, x >= y); Assert.Equal(SqlBoolean.Null, SqlInt32.GreaterThanOrEqual(x, y)); // Case 2: both are SqlInt32.Null y = SqlInt32.Null; Assert.Equal(SqlBoolean.Null, x >= y); Assert.Equal(SqlBoolean.Null, SqlInt32.GreaterThanOrEqual(x, y)); // Case 3: x > y x = new SqlInt32(5); y = new SqlInt32(4); Assert.Equal(SqlBoolean.True, x >= y); Assert.Equal(SqlBoolean.True, SqlInt32.GreaterThanOrEqual(x, y)); // Case 4: x < y x = new SqlInt32(5); y = new SqlInt32(6); Assert.Equal(SqlBoolean.False, x >= y); Assert.Equal(SqlBoolean.False, SqlInt32.GreaterThanOrEqual(x, y)); // Case 5: x == y x = new SqlInt32(5); y = new SqlInt32(5); Assert.Equal(SqlBoolean.True, x >= y); Assert.Equal(SqlBoolean.True, SqlInt32.GreaterThanOrEqual(x, y)); } [Fact] public void LessThan() { SqlInt32 x; SqlInt32 y; // Case 1: either is SqlInt32.Null x = SqlInt32.Null; y = new SqlInt32(5); Assert.Equal(SqlBoolean.Null, x < y); Assert.Equal(SqlBoolean.Null, SqlInt32.LessThan(x, y)); // Case 2: both are SqlInt32.Null y = SqlInt32.Null; Assert.Equal(SqlBoolean.Null, x < y); Assert.Equal(SqlBoolean.Null, SqlInt32.LessThan(x, y)); // Case 3: x > y x = new SqlInt32(5); y = new SqlInt32(4); Assert.Equal(SqlBoolean.False, x < y); Assert.Equal(SqlBoolean.False, SqlInt32.LessThan(x, y)); // Case 4: x < y x = new SqlInt32(5); y = new SqlInt32(6); Assert.Equal(SqlBoolean.True, x < y); Assert.Equal(SqlBoolean.True, SqlInt32.LessThan(x, y)); } [Fact] public void LessThanOrEqual() { SqlInt32 x; SqlInt32 y; // Case 1: either is SqlInt32.Null x = SqlInt32.Null; y = new SqlInt32(5); Assert.Equal(SqlBoolean.Null, x <= y); Assert.Equal(SqlBoolean.Null, SqlInt32.LessThanOrEqual(x, y)); // Case 2: both are SqlInt32.Null y = SqlInt32.Null; Assert.Equal(SqlBoolean.Null, x <= y); Assert.Equal(SqlBoolean.Null, SqlInt32.LessThanOrEqual(x, y)); // Case 3: x > y x = new SqlInt32(5); y = new SqlInt32(4); Assert.Equal(SqlBoolean.False, x <= y); Assert.Equal(SqlBoolean.False, SqlInt32.LessThanOrEqual(x, y)); // Case 4: x < y x = new SqlInt32(5); y = new SqlInt32(6); Assert.Equal(SqlBoolean.True, x <= y); Assert.Equal(SqlBoolean.True, SqlInt32.LessThanOrEqual(x, y)); // Case 5: x == y x = new SqlInt32(5); y = new SqlInt32(5); Assert.Equal(SqlBoolean.True, x <= y); Assert.Equal(SqlBoolean.True, SqlInt32.LessThanOrEqual(x, y)); } [Fact] public void Mod() { int a = 5; int b = 7; SqlInt32 x = new SqlInt32(a); SqlInt32 y = new SqlInt32(b); SqlInt32 z = x % y; Assert.Equal(a % b, z.Value); z = SqlInt32.Mod(x, y); Assert.Equal(a % b, z.Value); } [Fact] public void Modulus() { int a = 50; int b = 7; SqlInt32 x = new SqlInt32(a); SqlInt32 y = new SqlInt32(b); SqlInt32 z = x % y; Assert.Equal(a % b, z.Value); z = SqlInt32.Modulus(x, y); Assert.Equal(a % b, z.Value); } [Fact] public void Multiply() { int a = 5; int b = 7; SqlInt32 x = new SqlInt32(a); SqlInt32 y = new SqlInt32(b); SqlInt32 z = x * y; Assert.Equal(a * b, z.Value); z = SqlInt32.Multiply(x, y); Assert.Equal(a * b, z.Value); } [Fact] public void NotEquals() { SqlInt32 x; SqlInt32 y; x = new SqlInt32(5); y = SqlInt32.Null; Assert.Equal(SqlBoolean.Null, x != y); Assert.Equal(SqlBoolean.Null, SqlInt32.NotEquals(x, y)); y = new SqlInt32(5); Assert.Equal(SqlBoolean.False, x != y); Assert.Equal(SqlBoolean.False, SqlInt32.NotEquals(x, y)); y = new SqlInt32(6); Assert.Equal(SqlBoolean.True, x != y); Assert.Equal(SqlBoolean.True, SqlInt32.NotEquals(x, y)); } [Fact] public void OnesComplement() { int a = 5; SqlInt32 x = new SqlInt32(a); SqlInt32 z = ~x; Assert.Equal(~a, z.Value); z = SqlInt32.OnesComplement(x); Assert.Equal(~a, z.Value); } [Fact] public void IsNullProperty() { SqlInt32 n = SqlInt32.Null; Assert.True(n.IsNull); } [Fact] public void Subtract() { int a = 7; int b = 5; SqlInt32 x = new SqlInt32(a); SqlInt32 y = new SqlInt32(b); SqlInt32 z = x - y; Assert.Equal(a - b, z.Value); z = SqlInt32.Subtract(x, y); Assert.Equal(a - b, z.Value); } [Fact] public void ConversionMethods() { SqlInt32 x; // Case 1: SqlInt32.Null -> SqlBoolean == SqlBoolean.Null x = SqlInt32.Null; Assert.Equal(SqlBoolean.Null, x.ToSqlBoolean()); // Case 2: SqlInt32.Zero -> SqlBoolean == False x = SqlInt32.Zero; Assert.Equal(SqlBoolean.False, x.ToSqlBoolean()); // Case 3: SqlInt32(nonzero) -> SqlBoolean == True x = new SqlInt32(27); Assert.Equal(SqlBoolean.True, x.ToSqlBoolean()); // Case 4: SqlInt32.Null -> SqlByte == SqlByte.Null x = SqlInt32.Null; Assert.Equal(SqlByte.Null, x.ToSqlByte()); // Case 5: Test non-null conversion to SqlByte x = new SqlInt32(27); Assert.Equal((byte)27, x.ToSqlByte().Value); // Case 6: SqlInt32.Null -> SqlDecimal == SqlDecimal.Null x = SqlInt32.Null; Assert.Equal(SqlDecimal.Null, x.ToSqlDecimal()); // Case 7: Test non-null conversion to SqlDecimal x = new SqlInt32(27); Assert.Equal(27, x.ToSqlDecimal().Value); // Case 8: SqlInt32.Null -> SqlDouble == SqlDouble.Null x = SqlInt32.Null; Assert.Equal(SqlDouble.Null, x.ToSqlDouble()); // Case 9: Test non-null conversion to SqlDouble x = new SqlInt32(27); Assert.Equal(27, x.ToSqlDouble().Value); // Case 10: SqlInt32.Null -> SqlInt16 == SqlInt16.Null x = SqlInt32.Null; Assert.Equal(SqlInt16.Null, x.ToSqlInt16()); // Case 11: Test non-null conversion to SqlInt16 x = new SqlInt32(27); Assert.Equal((short)27, x.ToSqlInt16().Value); // Case 12: SqlInt32.Null -> SqlInt64 == SqlInt64.Null x = SqlInt32.Null; Assert.Equal(SqlInt64.Null, x.ToSqlInt64()); // Case 13: Test non-null conversion to SqlInt64 x = new SqlInt32(27); Assert.Equal(27, x.ToSqlInt64().Value); // Case 14: SqlInt32.Null -> SqlMoney == SqlMoney.Null x = SqlInt32.Null; Assert.Equal(SqlMoney.Null, x.ToSqlMoney()); // Case 15: Test non-null conversion to SqlMoney x = new SqlInt32(27); Assert.Equal(27.0000M, x.ToSqlMoney().Value); // Case 16: SqlInt32.Null -> SqlSingle == SqlSingle.Null x = SqlInt32.Null; Assert.Equal(SqlSingle.Null, x.ToSqlSingle()); // Case 17: Test non-null conversion to SqlSingle x = new SqlInt32(27); Assert.Equal(27, x.ToSqlSingle().Value); } [Fact] public void Xor() { int a = 5; int b = 7; SqlInt32 x = new SqlInt32(a); SqlInt32 y = new SqlInt32(b); SqlInt32 z = x ^ y; Assert.Equal(a ^ b, z.Value); z = SqlInt32.Xor(x, y); Assert.Equal(a ^ b, z.Value); } [Fact] public void GetXsdTypeTest() { XmlQualifiedName qualifiedName = SqlInt32.GetXsdType(null); Assert.Equal("int", qualifiedName.Name); } internal void ReadWriteXmlTestInternal(string xml, int testval, string unit_test_id) { SqlInt32 test; SqlInt32 test1; XmlSerializer ser; StringWriter sw; XmlTextWriter xw; StringReader sr; XmlTextReader xr; test = new SqlInt32(testval); ser = new XmlSerializer(typeof(SqlInt32)); sw = new StringWriter(); xw = new XmlTextWriter(sw); ser.Serialize(xw, test); // Assert.Equal (xml, sw.ToString ()); sr = new StringReader(xml); xr = new XmlTextReader(sr); test1 = (SqlInt32)ser.Deserialize(xr); Assert.Equal(testval, test1.Value); } [Fact] //[Category ("MobileNotWorking")] public void ReadWriteXmlTest() { string xml1 = "<?xml version=\"1.0\" encoding=\"utf-16\"?><int>4556</int>"; string xml2 = "<?xml version=\"1.0\" encoding=\"utf-16\"?><int>-6445</int>"; string xml3 = "<?xml version=\"1.0\" encoding=\"utf-16\"?><int>0x455687AB3E4D56F</int>"; int test1 = 4556; int test2 = -6445; int test3 = 0x4F56; ReadWriteXmlTestInternal(xml1, test1, "BA01"); ReadWriteXmlTestInternal(xml2, test2, "BA02"); InvalidOperationException ex = Assert.Throws<InvalidOperationException>(() => ReadWriteXmlTestInternal(xml3, test3, "#BA03")); Assert.Equal(typeof(FormatException), ex.InnerException.GetType()); } } }
-1
dotnet/runtime
66,235
Revert "Fix issues related to JsonSerializerOptions mutation and caching."
Reverts dotnet/runtime#65863 Fixes #66232
jkotas
2022-03-05T06:23:45Z
2022-03-05T14:08:07Z
17662fc30cfd4cc6e7dbba978a3fb512380c0b70
3ede8095da51d5d27a890020b61376155e9a61c2
Revert "Fix issues related to JsonSerializerOptions mutation and caching.". Reverts dotnet/runtime#65863 Fixes #66232
./src/libraries/System.Security.Cryptography/tests/DefaultECDiffieHellmanProvider.Windows.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Runtime.InteropServices; using Test.Cryptography; namespace System.Security.Cryptography.EcDiffieHellman.Tests { public partial class ECDiffieHellmanProvider : IECDiffieHellmanProvider { public bool IsCurveValid(Oid oid) { // Friendly name required for windows return NativeOidFriendlyNameExists(oid.FriendlyName); } public bool ExplicitCurvesSupported { get { return PlatformDetection.WindowsVersion >= 10; } } public bool CanDeriveNewPublicKey => true; private static bool NativeOidFriendlyNameExists(string oidFriendlyName) { if (string.IsNullOrEmpty(oidFriendlyName)) return false; try { // By specifying OidGroup.PublicKeyAlgorithm, no caches are used // Note: this throws when there is no oid value, even when friendly name is valid // so it cannot be used for curves with no oid value such as curve25519 return !string.IsNullOrEmpty(Oid.FromFriendlyName(oidFriendlyName, OidGroup.PublicKeyAlgorithm).FriendlyName); } catch (Exception) { return false; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Runtime.InteropServices; using Test.Cryptography; namespace System.Security.Cryptography.EcDiffieHellman.Tests { public partial class ECDiffieHellmanProvider : IECDiffieHellmanProvider { public bool IsCurveValid(Oid oid) { // Friendly name required for windows return NativeOidFriendlyNameExists(oid.FriendlyName); } public bool ExplicitCurvesSupported { get { return PlatformDetection.WindowsVersion >= 10; } } public bool CanDeriveNewPublicKey => true; private static bool NativeOidFriendlyNameExists(string oidFriendlyName) { if (string.IsNullOrEmpty(oidFriendlyName)) return false; try { // By specifying OidGroup.PublicKeyAlgorithm, no caches are used // Note: this throws when there is no oid value, even when friendly name is valid // so it cannot be used for curves with no oid value such as curve25519 return !string.IsNullOrEmpty(Oid.FromFriendlyName(oidFriendlyName, OidGroup.PublicKeyAlgorithm).FriendlyName); } catch (Exception) { return false; } } } }
-1
dotnet/runtime
66,235
Revert "Fix issues related to JsonSerializerOptions mutation and caching."
Reverts dotnet/runtime#65863 Fixes #66232
jkotas
2022-03-05T06:23:45Z
2022-03-05T14:08:07Z
17662fc30cfd4cc6e7dbba978a3fb512380c0b70
3ede8095da51d5d27a890020b61376155e9a61c2
Revert "Fix issues related to JsonSerializerOptions mutation and caching.". Reverts dotnet/runtime#65863 Fixes #66232
./src/tests/JIT/Regression/VS-ia64-JIT/M00/b84586/b84586.ilproj
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <OutputType>Exe</OutputType> </PropertyGroup> <PropertyGroup> <DebugType>PdbOnly</DebugType> <Optimize>True</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="$(MSBuildProjectName).il" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <OutputType>Exe</OutputType> </PropertyGroup> <PropertyGroup> <DebugType>PdbOnly</DebugType> <Optimize>True</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="$(MSBuildProjectName).il" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,235
Revert "Fix issues related to JsonSerializerOptions mutation and caching."
Reverts dotnet/runtime#65863 Fixes #66232
jkotas
2022-03-05T06:23:45Z
2022-03-05T14:08:07Z
17662fc30cfd4cc6e7dbba978a3fb512380c0b70
3ede8095da51d5d27a890020b61376155e9a61c2
Revert "Fix issues related to JsonSerializerOptions mutation and caching.". Reverts dotnet/runtime#65863 Fixes #66232
./src/tests/JIT/Directed/StrAccess/straccess3_cs_do.csproj
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> </PropertyGroup> <PropertyGroup> <DebugType>Full</DebugType> <Optimize>True</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="straccess3.cs" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> </PropertyGroup> <PropertyGroup> <DebugType>Full</DebugType> <Optimize>True</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="straccess3.cs" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,235
Revert "Fix issues related to JsonSerializerOptions mutation and caching."
Reverts dotnet/runtime#65863 Fixes #66232
jkotas
2022-03-05T06:23:45Z
2022-03-05T14:08:07Z
17662fc30cfd4cc6e7dbba978a3fb512380c0b70
3ede8095da51d5d27a890020b61376155e9a61c2
Revert "Fix issues related to JsonSerializerOptions mutation and caching.". Reverts dotnet/runtime#65863 Fixes #66232
./src/tests/JIT/Generics/Instantiation/delegates/Delegate009.csproj
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <DebugType>PdbOnly</DebugType> </PropertyGroup> <ItemGroup> <Compile Include="Delegate009.cs" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <DebugType>PdbOnly</DebugType> </PropertyGroup> <ItemGroup> <Compile Include="Delegate009.cs" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,235
Revert "Fix issues related to JsonSerializerOptions mutation and caching."
Reverts dotnet/runtime#65863 Fixes #66232
jkotas
2022-03-05T06:23:45Z
2022-03-05T14:08:07Z
17662fc30cfd4cc6e7dbba978a3fb512380c0b70
3ede8095da51d5d27a890020b61376155e9a61c2
Revert "Fix issues related to JsonSerializerOptions mutation and caching.". Reverts dotnet/runtime#65863 Fixes #66232
./src/libraries/System.CodeDom/tests/System/CodeDom/Compiler/CodeValidatorTests.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using Xunit; namespace System.CodeDom.Compiler.Tests { public class CodeValidatorTests { public static IEnumerable<object[]> ValidateIdentifiers_Valid_TestData() { // CodeComment. yield return new object[] { new CodeComment() }; yield return new object[] { new CodeCommentStatement() }; yield return new object[] { new CodeCommentStatement((string)null) }; yield return new object[] { new CodeCommentStatement(string.Empty) }; yield return new object[] { new CodeCommentStatement("text") }; // CodeChecksumPragma yield return new object[] { new CodeChecksumPragma() }; yield return new object[] { new CodeChecksumPragma(null, Guid.NewGuid(), new byte[0]) }; yield return new object[] { new CodeChecksumPragma(string.Empty, Guid.NewGuid(), new byte[0]) }; yield return new object[] { new CodeChecksumPragma("fileName", Guid.NewGuid(), new byte[0]) }; // CodeRegionDirective. yield return new object[] { new CodeRegionDirective() }; yield return new object[] { new CodeRegionDirective(CodeRegionMode.None, null) }; yield return new object[] { new CodeRegionDirective(CodeRegionMode.None, string.Empty) }; yield return new object[] { new CodeRegionDirective(CodeRegionMode.None, "name") }; // CodeNamespaceImport. yield return new object[] { new CodeNamespaceImport("nameSpace") }; var fullNamespaceImport = new CodeNamespaceImport("nameSpace") { LinePragma = new CodeLinePragma() }; yield return new object[] { fullNamespaceImport }; // CodeMemberEvent. yield return new object[] { new CodeMemberEvent() }; yield return new object[] { new CodeMemberEvent { Name = "0" } }; yield return new object[] { new CodeMemberEvent { Name = "name", PrivateImplementationType = new CodeTypeReference() } }; yield return new object[] { new CodeMemberEvent { Name = "name", PrivateImplementationType = new CodeTypeReference("0") } }; var fullEvent = new CodeMemberEvent { Name = "name", LinePragma = new CodeLinePragma(), PrivateImplementationType = new CodeTypeReference("type") }; fullEvent.Comments.Add(new CodeCommentStatement()); fullEvent.Comments.Add(new CodeCommentStatement("0")); fullEvent.Comments.Add(new CodeCommentStatement("text")); fullEvent.StartDirectives.Add(new CodeChecksumPragma()); fullEvent.StartDirectives.Add(new CodeChecksumPragma("fileName", Guid.NewGuid(), new byte[0])); fullEvent.EndDirectives.Add(new CodeChecksumPragma()); fullEvent.EndDirectives.Add(new CodeChecksumPragma("fileName", Guid.NewGuid(), new byte[0])); fullEvent.CustomAttributes.Add(new CodeAttributeDeclaration("attribute1")); fullEvent.CustomAttributes.Add(new CodeAttributeDeclaration("attribute2", new CodeAttributeArgument("name", new CodePrimitiveExpression(1)))); fullEvent.CustomAttributes.Add(new CodeAttributeDeclaration("attribute3", new CodeAttributeArgument(null, new CodePrimitiveExpression(1)), new CodeAttributeArgument(string.Empty, new CodePrimitiveExpression(1)), new CodeAttributeArgument("name", new CodePrimitiveExpression(1)))); fullEvent.ImplementationTypes.Add(new CodeTypeReference((string)null)); fullEvent.ImplementationTypes.Add(new CodeTypeReference(string.Empty)); fullEvent.ImplementationTypes.Add(new CodeTypeReference("constraint1")); fullEvent.ImplementationTypes.Add(new CodeTypeReference("constraint2`2", new CodeTypeReference("parameter1"), new CodeTypeReference("parameter2"))); yield return new object[] { fullEvent }; // CodeMemberField. yield return new object[] { new CodeMemberField(new CodeTypeReference("type"), "name") }; var fullField = new CodeMemberField(new CodeTypeReference("type"), "name") { LinePragma = new CodeLinePragma(), InitExpression = new CodePrimitiveExpression(1) }; fullField.Comments.Add(new CodeCommentStatement()); fullField.Comments.Add(new CodeCommentStatement("0")); fullField.Comments.Add(new CodeCommentStatement("text")); fullField.StartDirectives.Add(new CodeChecksumPragma()); fullField.StartDirectives.Add(new CodeChecksumPragma("fileName", Guid.NewGuid(), new byte[0])); fullField.EndDirectives.Add(new CodeChecksumPragma()); fullField.EndDirectives.Add(new CodeChecksumPragma("fileName", Guid.NewGuid(), new byte[0])); fullField.CustomAttributes.Add(new CodeAttributeDeclaration("attribute1")); fullField.CustomAttributes.Add(new CodeAttributeDeclaration("attribute2", new CodeAttributeArgument("name", new CodePrimitiveExpression(1)))); fullField.CustomAttributes.Add(new CodeAttributeDeclaration("attribute3", new CodeAttributeArgument(null, new CodePrimitiveExpression(1)), new CodeAttributeArgument(string.Empty, new CodePrimitiveExpression(1)), new CodeAttributeArgument("name", new CodePrimitiveExpression(1)))); yield return new object[] { fullField }; // CodeParameterDeclarationExpression. yield return new object[] { new CodeParameterDeclarationExpression(new CodeTypeReference("type"), "name") }; var fullParameter = new CodeParameterDeclarationExpression(new CodeTypeReference("type"), "name"); fullParameter.CustomAttributes.Add(new CodeAttributeDeclaration("attribute1")); fullParameter.CustomAttributes.Add(new CodeAttributeDeclaration("attribute2", new CodeAttributeArgument("name", new CodePrimitiveExpression(1)))); fullParameter.CustomAttributes.Add(new CodeAttributeDeclaration("attribute3", new CodeAttributeArgument(null, new CodePrimitiveExpression(1)), new CodeAttributeArgument(string.Empty, new CodePrimitiveExpression(1)), new CodeAttributeArgument("name", new CodePrimitiveExpression(1)))); yield return new object[] { fullParameter }; var fullTypeParameter = new CodeTypeParameter("parameter"); fullTypeParameter.CustomAttributes.Add(new CodeAttributeDeclaration("attribute1")); fullTypeParameter.CustomAttributes.Add(new CodeAttributeDeclaration("attribute2", new CodeAttributeArgument("name", new CodePrimitiveExpression(1)))); fullTypeParameter.CustomAttributes.Add(new CodeAttributeDeclaration("attribute3", new CodeAttributeArgument(null, new CodePrimitiveExpression(1)), new CodeAttributeArgument(string.Empty, new CodePrimitiveExpression(1)), new CodeAttributeArgument("name", new CodePrimitiveExpression(1)))); fullTypeParameter.Constraints.Add(new CodeTypeReference((string)null)); fullTypeParameter.Constraints.Add(new CodeTypeReference(string.Empty)); fullTypeParameter.Constraints.Add(new CodeTypeReference("constraint1")); fullTypeParameter.Constraints.Add(new CodeTypeReference("constraint2`2", new CodeTypeReference("parameter1"), new CodeTypeReference("parameter2"))); var invalidParameterAttribute1 = new CodeParameterDeclarationExpression(new CodeTypeReference("type"), "name"); invalidParameterAttribute1.CustomAttributes.Add(new CodeAttributeDeclaration()); var invalidParameterAttribute2 = new CodeParameterDeclarationExpression(new CodeTypeReference("type"), "name"); invalidParameterAttribute2.CustomAttributes.Add(new CodeAttributeDeclaration((string)null)); var invalidParameterAttribute3 = new CodeParameterDeclarationExpression(new CodeTypeReference("type"), "name"); invalidParameterAttribute3.CustomAttributes.Add(new CodeAttributeDeclaration(string.Empty)); var invalidParameterAttribute4 = new CodeParameterDeclarationExpression(new CodeTypeReference("type"), "name"); invalidParameterAttribute4.CustomAttributes.Add(new CodeAttributeDeclaration("0")); var invalidParameterAttribute5 = new CodeParameterDeclarationExpression(new CodeTypeReference("type"), "name"); invalidParameterAttribute5.CustomAttributes.Add(new CodeAttributeDeclaration("name", new CodeAttributeArgument("0", new CodePrimitiveExpression(1)))); var invalidParameterAttribute6 = new CodeParameterDeclarationExpression(new CodeTypeReference("type"), "name"); invalidParameterAttribute6.CustomAttributes.Add(new CodeAttributeDeclaration("name", new CodeAttributeArgument("name", new CodeExpression()))); // CodeMemberMethod. yield return new object[] { new CodeMemberMethod { Name = "name" } }; var abstractMethod = new CodeMemberMethod { Name = "name", Attributes = MemberAttributes.Abstract }; abstractMethod.Statements.Add(new CodeStatement()); yield return new object[] { abstractMethod }; var fullMethod = new CodeMemberMethod { Name = "name", LinePragma = new CodeLinePragma(), ReturnType = new CodeTypeReference("returnType"), PrivateImplementationType = new CodeTypeReference("privateImplementationType") }; fullMethod.Comments.Add(new CodeCommentStatement()); fullMethod.Comments.Add(new CodeCommentStatement("0")); fullMethod.Comments.Add(new CodeCommentStatement("text")); fullMethod.StartDirectives.Add(new CodeChecksumPragma()); fullMethod.StartDirectives.Add(new CodeChecksumPragma("fileName", Guid.NewGuid(), new byte[0])); fullMethod.EndDirectives.Add(new CodeChecksumPragma()); fullMethod.EndDirectives.Add(new CodeChecksumPragma("fileName", Guid.NewGuid(), new byte[0])); fullMethod.CustomAttributes.Add(new CodeAttributeDeclaration("attribute1")); fullMethod.CustomAttributes.Add(new CodeAttributeDeclaration("attribute2", new CodeAttributeArgument("name", new CodePrimitiveExpression(1)))); fullMethod.CustomAttributes.Add(new CodeAttributeDeclaration("attribute3", new CodeAttributeArgument(null, new CodePrimitiveExpression(1)), new CodeAttributeArgument(string.Empty, new CodePrimitiveExpression(1)), new CodeAttributeArgument("name", new CodePrimitiveExpression(1)))); fullMethod.ImplementationTypes.Add(new CodeTypeReference((string)null)); fullMethod.ImplementationTypes.Add(new CodeTypeReference(string.Empty)); fullMethod.ImplementationTypes.Add(new CodeTypeReference("constraint1")); fullMethod.ImplementationTypes.Add(new CodeTypeReference("constraint2`2", new CodeTypeReference("parameter1"), new CodeTypeReference("parameter2"))); fullMethod.ReturnTypeCustomAttributes.Add(new CodeAttributeDeclaration("attribute1")); fullMethod.ReturnTypeCustomAttributes.Add(new CodeAttributeDeclaration("attribute2", new CodeAttributeArgument("name", new CodePrimitiveExpression(1)))); fullMethod.ReturnTypeCustomAttributes.Add(new CodeAttributeDeclaration("attribute3", new CodeAttributeArgument(null, new CodePrimitiveExpression(1)), new CodeAttributeArgument(string.Empty, new CodePrimitiveExpression(1)), new CodeAttributeArgument("name", new CodePrimitiveExpression(1)))); fullMethod.Statements.Add(new CodeMethodReturnStatement()); fullMethod.Statements.Add(new CodeMethodReturnStatement { LinePragma = new CodeLinePragma() }); fullMethod.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference("type"), "name")); fullMethod.Parameters.Add(fullParameter); fullMethod.TypeParameters.Add(new CodeTypeParameter("parameter1")); fullMethod.TypeParameters.Add(fullTypeParameter); yield return new object[] { fullMethod }; // CodeEntryPointMethod. yield return new object[] { new CodeEntryPointMethod() }; yield return new object[] { new CodeEntryPointMethod { Name = null } }; yield return new object[] { new CodeEntryPointMethod { Name = string.Empty } }; yield return new object[] { new CodeEntryPointMethod { Name = "name" } }; yield return new object[] { new CodeEntryPointMethod { Name = "0" } }; yield return new object[] { new CodeEntryPointMethod { Name = "name", ReturnType = new CodeTypeReference() } }; yield return new object[] { new CodeEntryPointMethod { Name = "name", ReturnType = new CodeTypeReference("0") } }; yield return new object[] { new CodeEntryPointMethod { Name = "name", PrivateImplementationType = new CodeTypeReference() } }; yield return new object[] { new CodeEntryPointMethod { Name = "name", PrivateImplementationType = new CodeTypeReference("0") } }; var abstractEntryPointMethod = new CodeEntryPointMethod { Name = "name", Attributes = MemberAttributes.Abstract }; abstractEntryPointMethod.Statements.Add(new CodeMethodReturnStatement()); yield return new object[] { abstractEntryPointMethod }; var fullEntryPointMethod = new CodeEntryPointMethod { Name = "name", LinePragma = new CodeLinePragma(), ReturnType = new CodeTypeReference("returnType"), PrivateImplementationType = new CodeTypeReference("privateImplementationType") }; fullEntryPointMethod.Comments.Add(new CodeCommentStatement()); fullEntryPointMethod.Comments.Add(new CodeCommentStatement("0")); fullEntryPointMethod.Comments.Add(new CodeCommentStatement("text")); fullEntryPointMethod.StartDirectives.Add(new CodeChecksumPragma()); fullEntryPointMethod.StartDirectives.Add(new CodeChecksumPragma("fileName", Guid.NewGuid(), new byte[0])); fullEntryPointMethod.EndDirectives.Add(new CodeChecksumPragma()); fullEntryPointMethod.EndDirectives.Add(new CodeChecksumPragma("fileName", Guid.NewGuid(), new byte[0])); fullEntryPointMethod.CustomAttributes.Add(new CodeAttributeDeclaration("attribute1")); fullEntryPointMethod.CustomAttributes.Add(new CodeAttributeDeclaration("attribute2", new CodeAttributeArgument("name", new CodePrimitiveExpression(1)))); fullEntryPointMethod.CustomAttributes.Add(new CodeAttributeDeclaration("attribute3", new CodeAttributeArgument(null, new CodePrimitiveExpression(1)), new CodeAttributeArgument(string.Empty, new CodePrimitiveExpression(1)), new CodeAttributeArgument("name", new CodePrimitiveExpression(1)))); fullEntryPointMethod.CustomAttributes.Add(new CodeAttributeDeclaration()); fullEntryPointMethod.CustomAttributes.Add(new CodeAttributeDeclaration((string)null)); fullEntryPointMethod.CustomAttributes.Add(new CodeAttributeDeclaration(string.Empty)); fullEntryPointMethod.CustomAttributes.Add(new CodeAttributeDeclaration("0")); fullEntryPointMethod.CustomAttributes.Add(new CodeAttributeDeclaration("name", new CodeAttributeArgument("0", new CodePrimitiveExpression(1)))); fullEntryPointMethod.CustomAttributes.Add(new CodeAttributeDeclaration("name", new CodeAttributeArgument("name", new CodeExpression()))); fullEntryPointMethod.ImplementationTypes.Add(new CodeTypeReference((string)null)); fullEntryPointMethod.ImplementationTypes.Add(new CodeTypeReference(string.Empty)); fullEntryPointMethod.ImplementationTypes.Add(new CodeTypeReference("constraint1")); fullEntryPointMethod.ImplementationTypes.Add(new CodeTypeReference("constraint2`2", new CodeTypeReference("parameter1"), new CodeTypeReference("parameter2"))); fullEntryPointMethod.ReturnTypeCustomAttributes.Add(new CodeAttributeDeclaration("attribute1")); fullEntryPointMethod.ReturnTypeCustomAttributes.Add(new CodeAttributeDeclaration("attribute2", new CodeAttributeArgument("name", new CodePrimitiveExpression(1)))); fullEntryPointMethod.ReturnTypeCustomAttributes.Add(new CodeAttributeDeclaration("attribute3", new CodeAttributeArgument(null, new CodePrimitiveExpression(1)), new CodeAttributeArgument(string.Empty, new CodePrimitiveExpression(1)), new CodeAttributeArgument("name", new CodePrimitiveExpression(1)))); fullEntryPointMethod.ReturnTypeCustomAttributes.Add(new CodeAttributeDeclaration()); fullEntryPointMethod.ReturnTypeCustomAttributes.Add(new CodeAttributeDeclaration((string)null)); fullEntryPointMethod.ReturnTypeCustomAttributes.Add(new CodeAttributeDeclaration(string.Empty)); fullEntryPointMethod.ReturnTypeCustomAttributes.Add(new CodeAttributeDeclaration("0")); fullEntryPointMethod.ReturnTypeCustomAttributes.Add(new CodeAttributeDeclaration("name", new CodeAttributeArgument("0", new CodePrimitiveExpression(1)))); fullEntryPointMethod.ReturnTypeCustomAttributes.Add(new CodeAttributeDeclaration("name", new CodeAttributeArgument("name", new CodeExpression()))); fullEntryPointMethod.Statements.Add(new CodeMethodReturnStatement()); fullEntryPointMethod.Statements.Add(new CodeMethodReturnStatement { LinePragma = new CodeLinePragma() }); fullEntryPointMethod.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference("type"), "name")); fullEntryPointMethod.Parameters.Add(fullParameter); fullEntryPointMethod.Parameters.Add(new CodeParameterDeclarationExpression()); fullEntryPointMethod.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference(), "name")); fullEntryPointMethod.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference("0"), "name")); fullEntryPointMethod.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference("type"), null)); fullEntryPointMethod.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference("type"), string.Empty)); fullEntryPointMethod.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference("type"), "0")); fullEntryPointMethod.Parameters.Add(invalidParameterAttribute1); fullEntryPointMethod.Parameters.Add(invalidParameterAttribute2); fullEntryPointMethod.Parameters.Add(invalidParameterAttribute3); fullEntryPointMethod.Parameters.Add(invalidParameterAttribute4); fullEntryPointMethod.Parameters.Add(invalidParameterAttribute5); fullEntryPointMethod.Parameters.Add(invalidParameterAttribute6); fullEntryPointMethod.TypeParameters.Add(new CodeTypeParameter("parameter1")); fullEntryPointMethod.TypeParameters.Add(fullTypeParameter); yield return new object[] { fullEntryPointMethod }; // CodeConstructor. yield return new object[] { new CodeConstructor() }; yield return new object[] { new CodeConstructor { Name = null } }; yield return new object[] { new CodeConstructor { Name = string.Empty } }; yield return new object[] { new CodeConstructor { Name = "0" } }; yield return new object[] { new CodeConstructor { Name = "name", ReturnType = new CodeTypeReference() } }; yield return new object[] { new CodeConstructor { Name = "name", ReturnType = new CodeTypeReference("0") } }; yield return new object[] { new CodeConstructor { Name = "name", PrivateImplementationType = new CodeTypeReference() } }; yield return new object[] { new CodeConstructor { Name = "name", PrivateImplementationType = new CodeTypeReference("0") } }; var fullConstructor = new CodeConstructor { Name = "name", LinePragma = new CodeLinePragma(), ReturnType = new CodeTypeReference("returnType"), PrivateImplementationType = new CodeTypeReference("privateImplementationType") }; fullConstructor.Comments.Add(new CodeCommentStatement()); fullConstructor.Comments.Add(new CodeCommentStatement("0")); fullConstructor.Comments.Add(new CodeCommentStatement("text")); fullConstructor.StartDirectives.Add(new CodeChecksumPragma()); fullConstructor.StartDirectives.Add(new CodeChecksumPragma("fileName", Guid.NewGuid(), new byte[0])); fullConstructor.EndDirectives.Add(new CodeChecksumPragma()); fullConstructor.EndDirectives.Add(new CodeChecksumPragma("fileName", Guid.NewGuid(), new byte[0])); fullConstructor.CustomAttributes.Add(new CodeAttributeDeclaration("attribute1")); fullConstructor.CustomAttributes.Add(new CodeAttributeDeclaration("attribute2", new CodeAttributeArgument("name", new CodePrimitiveExpression(1)))); fullConstructor.CustomAttributes.Add(new CodeAttributeDeclaration("attribute3", new CodeAttributeArgument(null, new CodePrimitiveExpression(1)), new CodeAttributeArgument(string.Empty, new CodePrimitiveExpression(1)), new CodeAttributeArgument("name", new CodePrimitiveExpression(1)))); fullConstructor.ImplementationTypes.Add(new CodeTypeReference((string)null)); fullConstructor.ImplementationTypes.Add(new CodeTypeReference(string.Empty)); fullConstructor.ImplementationTypes.Add(new CodeTypeReference("constraint1")); fullConstructor.ImplementationTypes.Add(new CodeTypeReference("constraint2`2", new CodeTypeReference("parameter1"), new CodeTypeReference("parameter2"))); fullConstructor.ReturnTypeCustomAttributes.Add(new CodeAttributeDeclaration("attribute1")); fullConstructor.ReturnTypeCustomAttributes.Add(new CodeAttributeDeclaration("attribute2", new CodeAttributeArgument("name", new CodePrimitiveExpression(1)))); fullConstructor.ReturnTypeCustomAttributes.Add(new CodeAttributeDeclaration("attribute3", new CodeAttributeArgument(null, new CodePrimitiveExpression(1)), new CodeAttributeArgument(string.Empty, new CodePrimitiveExpression(1)), new CodeAttributeArgument("name", new CodePrimitiveExpression(1)))); fullConstructor.ReturnTypeCustomAttributes.Add(new CodeAttributeDeclaration()); fullConstructor.ReturnTypeCustomAttributes.Add(new CodeAttributeDeclaration((string)null)); fullConstructor.ReturnTypeCustomAttributes.Add(new CodeAttributeDeclaration(string.Empty)); fullConstructor.ReturnTypeCustomAttributes.Add(new CodeAttributeDeclaration("0")); fullConstructor.ReturnTypeCustomAttributes.Add(new CodeAttributeDeclaration("name", new CodeAttributeArgument("0", new CodePrimitiveExpression(1)))); fullConstructor.ReturnTypeCustomAttributes.Add(new CodeAttributeDeclaration("name", new CodeAttributeArgument("name", new CodeExpression()))); fullConstructor.Statements.Add(new CodeMethodReturnStatement()); fullConstructor.Statements.Add(new CodeMethodReturnStatement { LinePragma = new CodeLinePragma() }); fullConstructor.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference("type"), "name")); fullConstructor.Parameters.Add(fullParameter); fullConstructor.TypeParameters.Add(new CodeTypeParameter("parameter1")); fullConstructor.TypeParameters.Add(fullTypeParameter); fullConstructor.BaseConstructorArgs.Add(new CodePrimitiveExpression(1)); fullConstructor.ChainedConstructorArgs.Add(new CodePrimitiveExpression(1)); yield return new object[] { fullConstructor }; // CodeTypeConstructor. yield return new object[] { new CodeTypeConstructor() }; yield return new object[] { new CodeTypeConstructor { Name = null } }; yield return new object[] { new CodeTypeConstructor { Name = string.Empty } }; yield return new object[] { new CodeTypeConstructor { Name = "name" } }; yield return new object[] { new CodeTypeConstructor { Name = "0" } }; yield return new object[] { new CodeTypeConstructor { Name = "name", ReturnType = new CodeTypeReference() } }; yield return new object[] { new CodeTypeConstructor { Name = "name", ReturnType = new CodeTypeReference("0") } }; yield return new object[] { new CodeTypeConstructor { Name = "name", PrivateImplementationType = new CodeTypeReference() } }; yield return new object[] { new CodeTypeConstructor { Name = "name", PrivateImplementationType = new CodeTypeReference("0") } }; var abstractTypeConstructor = new CodeTypeConstructor { Name = "name", Attributes = MemberAttributes.Abstract }; abstractTypeConstructor.Statements.Add(new CodeMethodReturnStatement()); yield return new object[] { abstractTypeConstructor }; var fullTypeConstructor = new CodeTypeConstructor { Name = "name", LinePragma = new CodeLinePragma(), ReturnType = new CodeTypeReference("returnType"), PrivateImplementationType = new CodeTypeReference("privateImplementationType") }; fullTypeConstructor.Comments.Add(new CodeCommentStatement()); fullTypeConstructor.Comments.Add(new CodeCommentStatement("0")); fullTypeConstructor.Comments.Add(new CodeCommentStatement("text")); fullTypeConstructor.StartDirectives.Add(new CodeChecksumPragma()); fullTypeConstructor.StartDirectives.Add(new CodeChecksumPragma("fileName", Guid.NewGuid(), new byte[0])); fullTypeConstructor.EndDirectives.Add(new CodeChecksumPragma()); fullTypeConstructor.EndDirectives.Add(new CodeChecksumPragma("fileName", Guid.NewGuid(), new byte[0])); fullTypeConstructor.CustomAttributes.Add(new CodeAttributeDeclaration("attribute1")); fullTypeConstructor.CustomAttributes.Add(new CodeAttributeDeclaration("attribute2", new CodeAttributeArgument("name", new CodePrimitiveExpression(1)))); fullTypeConstructor.CustomAttributes.Add(new CodeAttributeDeclaration("attribute3", new CodeAttributeArgument(null, new CodePrimitiveExpression(1)), new CodeAttributeArgument(string.Empty, new CodePrimitiveExpression(1)), new CodeAttributeArgument("name", new CodePrimitiveExpression(1)))); fullTypeConstructor.CustomAttributes.Add(new CodeAttributeDeclaration()); fullTypeConstructor.CustomAttributes.Add(new CodeAttributeDeclaration((string)null)); fullTypeConstructor.CustomAttributes.Add(new CodeAttributeDeclaration(string.Empty)); fullTypeConstructor.CustomAttributes.Add(new CodeAttributeDeclaration("0")); fullTypeConstructor.CustomAttributes.Add(new CodeAttributeDeclaration("name", new CodeAttributeArgument("0", new CodePrimitiveExpression(1)))); fullTypeConstructor.CustomAttributes.Add(new CodeAttributeDeclaration("name", new CodeAttributeArgument("name", new CodeExpression()))); fullTypeConstructor.ImplementationTypes.Add(new CodeTypeReference((string)null)); fullTypeConstructor.ImplementationTypes.Add(new CodeTypeReference(string.Empty)); fullTypeConstructor.ImplementationTypes.Add(new CodeTypeReference("constraint1")); fullTypeConstructor.ImplementationTypes.Add(new CodeTypeReference("constraint2`2", new CodeTypeReference("parameter1"), new CodeTypeReference("parameter2"))); fullTypeConstructor.ReturnTypeCustomAttributes.Add(new CodeAttributeDeclaration("attribute1")); fullTypeConstructor.ReturnTypeCustomAttributes.Add(new CodeAttributeDeclaration("attribute2", new CodeAttributeArgument("name", new CodePrimitiveExpression(1)))); fullTypeConstructor.ReturnTypeCustomAttributes.Add(new CodeAttributeDeclaration("attribute3", new CodeAttributeArgument(null, new CodePrimitiveExpression(1)), new CodeAttributeArgument(string.Empty, new CodePrimitiveExpression(1)), new CodeAttributeArgument("name", new CodePrimitiveExpression(1)))); fullTypeConstructor.ReturnTypeCustomAttributes.Add(new CodeAttributeDeclaration()); fullTypeConstructor.ReturnTypeCustomAttributes.Add(new CodeAttributeDeclaration((string)null)); fullTypeConstructor.ReturnTypeCustomAttributes.Add(new CodeAttributeDeclaration(string.Empty)); fullTypeConstructor.ReturnTypeCustomAttributes.Add(new CodeAttributeDeclaration("0")); fullTypeConstructor.ReturnTypeCustomAttributes.Add(new CodeAttributeDeclaration("name", new CodeAttributeArgument("0", new CodePrimitiveExpression(1)))); fullTypeConstructor.ReturnTypeCustomAttributes.Add(new CodeAttributeDeclaration("name", new CodeAttributeArgument("name", new CodeExpression()))); fullTypeConstructor.Statements.Add(new CodeMethodReturnStatement()); fullTypeConstructor.Statements.Add(new CodeMethodReturnStatement { LinePragma = new CodeLinePragma() }); fullTypeConstructor.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference("type"), "name")); fullTypeConstructor.Parameters.Add(fullParameter); fullTypeConstructor.Parameters.Add(new CodeParameterDeclarationExpression()); fullTypeConstructor.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference(), "name")); fullTypeConstructor.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference("0"), "name")); fullTypeConstructor.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference("type"), null)); fullTypeConstructor.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference("type"), string.Empty)); fullTypeConstructor.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference("type"), "0")); fullTypeConstructor.Parameters.Add(invalidParameterAttribute1); fullTypeConstructor.Parameters.Add(invalidParameterAttribute2); fullTypeConstructor.Parameters.Add(invalidParameterAttribute3); fullTypeConstructor.Parameters.Add(invalidParameterAttribute4); fullTypeConstructor.Parameters.Add(invalidParameterAttribute5); fullTypeConstructor.Parameters.Add(invalidParameterAttribute6); fullTypeConstructor.TypeParameters.Add(new CodeTypeParameter("parameter1")); fullTypeConstructor.TypeParameters.Add(fullTypeParameter); yield return new object[] { fullTypeConstructor }; // CodeMemberProperty. yield return new object[] { new CodeMemberProperty { Name = "name" } }; yield return new object[] { new CodeMemberProperty { Name = "item" } }; yield return new object[] { new CodeMemberProperty { Name = "Item" } }; var abstractProperty = new CodeMemberProperty { Name = "name", Attributes = MemberAttributes.Abstract }; abstractProperty.GetStatements.Add(new CodeStatement()); abstractProperty.SetStatements.Add(new CodeStatement()); yield return new object[] { abstractProperty }; var fullItemPropertyUpper = new CodeMemberProperty { Name = "Item", PrivateImplementationType = new CodeTypeReference("implementationType") }; fullItemPropertyUpper.Comments.Add(new CodeCommentStatement()); fullItemPropertyUpper.Comments.Add(new CodeCommentStatement("0")); fullItemPropertyUpper.Comments.Add(new CodeCommentStatement("text")); fullItemPropertyUpper.StartDirectives.Add(new CodeChecksumPragma()); fullItemPropertyUpper.StartDirectives.Add(new CodeChecksumPragma("fileName", Guid.NewGuid(), new byte[0])); fullItemPropertyUpper.EndDirectives.Add(new CodeChecksumPragma()); fullItemPropertyUpper.EndDirectives.Add(new CodeChecksumPragma("fileName", Guid.NewGuid(), new byte[0])); fullItemPropertyUpper.CustomAttributes.Add(new CodeAttributeDeclaration("attribute1")); fullItemPropertyUpper.CustomAttributes.Add(new CodeAttributeDeclaration("attribute2", new CodeAttributeArgument("name", new CodePrimitiveExpression(1)))); fullItemPropertyUpper.CustomAttributes.Add(new CodeAttributeDeclaration("attribute3", new CodeAttributeArgument(null, new CodePrimitiveExpression(1)), new CodeAttributeArgument(string.Empty, new CodePrimitiveExpression(1)), new CodeAttributeArgument("name", new CodePrimitiveExpression(1)))); fullItemPropertyUpper.GetStatements.Add(new CodeMethodReturnStatement()); fullItemPropertyUpper.GetStatements.Add(new CodeMethodReturnStatement { LinePragma = new CodeLinePragma() }); fullItemPropertyUpper.SetStatements.Add(new CodeMethodReturnStatement()); fullItemPropertyUpper.SetStatements.Add(new CodeMethodReturnStatement { LinePragma = new CodeLinePragma() }); fullItemPropertyUpper.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference("type"), "name")); fullItemPropertyUpper.Parameters.Add(fullParameter); yield return new object[] { fullItemPropertyUpper }; var fullItemPropertyLower = new CodeMemberProperty { Name = "Item", PrivateImplementationType = new CodeTypeReference("implementationType") }; fullItemPropertyLower.Comments.Add(new CodeCommentStatement()); fullItemPropertyLower.Comments.Add(new CodeCommentStatement("0")); fullItemPropertyLower.Comments.Add(new CodeCommentStatement("text")); fullItemPropertyLower.StartDirectives.Add(new CodeChecksumPragma()); fullItemPropertyLower.StartDirectives.Add(new CodeChecksumPragma("fileName", Guid.NewGuid(), new byte[0])); fullItemPropertyLower.EndDirectives.Add(new CodeChecksumPragma()); fullItemPropertyLower.EndDirectives.Add(new CodeChecksumPragma("fileName", Guid.NewGuid(), new byte[0])); fullItemPropertyLower.CustomAttributes.Add(new CodeAttributeDeclaration("attribute1")); fullItemPropertyLower.CustomAttributes.Add(new CodeAttributeDeclaration("attribute2", new CodeAttributeArgument("name", new CodePrimitiveExpression(1)))); fullItemPropertyLower.CustomAttributes.Add(new CodeAttributeDeclaration("attribute3", new CodeAttributeArgument(null, new CodePrimitiveExpression(1)), new CodeAttributeArgument(string.Empty, new CodePrimitiveExpression(1)), new CodeAttributeArgument("name", new CodePrimitiveExpression(1)))); fullItemPropertyLower.GetStatements.Add(new CodeMethodReturnStatement()); fullItemPropertyLower.GetStatements.Add(new CodeMethodReturnStatement { LinePragma = new CodeLinePragma() }); fullItemPropertyLower.SetStatements.Add(new CodeMethodReturnStatement()); fullItemPropertyLower.SetStatements.Add(new CodeMethodReturnStatement { LinePragma = new CodeLinePragma() }); fullItemPropertyLower.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference("type"), "name")); fullItemPropertyLower.Parameters.Add(fullParameter); yield return new object[] { fullItemPropertyLower }; var fullProperty = new CodeMemberProperty { Name = "name", PrivateImplementationType = new CodeTypeReference("implementationType") }; fullProperty.Comments.Add(new CodeCommentStatement()); fullProperty.Comments.Add(new CodeCommentStatement("0")); fullProperty.Comments.Add(new CodeCommentStatement("text")); fullProperty.StartDirectives.Add(new CodeChecksumPragma()); fullProperty.StartDirectives.Add(new CodeChecksumPragma("fileName", Guid.NewGuid(), new byte[0])); fullProperty.EndDirectives.Add(new CodeChecksumPragma()); fullProperty.EndDirectives.Add(new CodeChecksumPragma("fileName", Guid.NewGuid(), new byte[0])); fullProperty.CustomAttributes.Add(new CodeAttributeDeclaration("attribute1")); fullProperty.CustomAttributes.Add(new CodeAttributeDeclaration("attribute2", new CodeAttributeArgument("name", new CodePrimitiveExpression(1)))); fullProperty.CustomAttributes.Add(new CodeAttributeDeclaration("attribute3", new CodeAttributeArgument(null, new CodePrimitiveExpression(1)), new CodeAttributeArgument(string.Empty, new CodePrimitiveExpression(1)), new CodeAttributeArgument("name", new CodePrimitiveExpression(1)))); fullProperty.GetStatements.Add(new CodeMethodReturnStatement()); fullProperty.GetStatements.Add(new CodeMethodReturnStatement { LinePragma = new CodeLinePragma() }); fullProperty.SetStatements.Add(new CodeMethodReturnStatement()); fullProperty.SetStatements.Add(new CodeMethodReturnStatement { LinePragma = new CodeLinePragma() }); fullProperty.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference("type"), "name")); fullProperty.Parameters.Add(fullParameter); fullProperty.Parameters.Add(new CodeParameterDeclarationExpression()); fullProperty.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference(), "name")); fullProperty.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference("0"), "name")); fullProperty.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference("type"), null)); fullProperty.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference("type"), string.Empty)); fullProperty.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference("type"), "0")); fullProperty.Parameters.Add(invalidParameterAttribute1); fullProperty.Parameters.Add(invalidParameterAttribute2); fullProperty.Parameters.Add(invalidParameterAttribute3); fullProperty.Parameters.Add(invalidParameterAttribute4); fullProperty.Parameters.Add(invalidParameterAttribute5); fullProperty.Parameters.Add(invalidParameterAttribute6); yield return new object[] { fullProperty }; // CodeSnippetTypeMember. yield return new object[] { new CodeSnippetTypeMember() }; yield return new object[] { new CodeSnippetTypeMember(null) }; yield return new object[] { new CodeSnippetTypeMember(string.Empty) }; yield return new object[] { new CodeSnippetTypeMember("0") }; yield return new object[] { new CodeSnippetTypeMember("text") }; var fullSnippetTypeMember = new CodeSnippetTypeMember("text"); fullSnippetTypeMember.Comments.Add(new CodeCommentStatement()); fullSnippetTypeMember.Comments.Add(new CodeCommentStatement("0")); fullSnippetTypeMember.Comments.Add(new CodeCommentStatement("text")); fullSnippetTypeMember.StartDirectives.Add(new CodeChecksumPragma()); fullSnippetTypeMember.StartDirectives.Add(new CodeChecksumPragma("fileName", Guid.NewGuid(), new byte[0])); fullSnippetTypeMember.EndDirectives.Add(new CodeChecksumPragma()); fullSnippetTypeMember.EndDirectives.Add(new CodeChecksumPragma("fileName", Guid.NewGuid(), new byte[0])); fullSnippetTypeMember.CustomAttributes.Add(new CodeAttributeDeclaration("attribute1")); fullSnippetTypeMember.CustomAttributes.Add(new CodeAttributeDeclaration("attribute2", new CodeAttributeArgument("name", new CodePrimitiveExpression(1)))); fullSnippetTypeMember.CustomAttributes.Add(new CodeAttributeDeclaration("attribute3", new CodeAttributeArgument(null, new CodePrimitiveExpression(1)), new CodeAttributeArgument(string.Empty, new CodePrimitiveExpression(1)), new CodeAttributeArgument("name", new CodePrimitiveExpression(1)))); yield return new object[] { fullSnippetTypeMember }; // CodeTypeDeclaration. yield return new object[] { new CodeTypeDeclaration("name") }; var interfaceTypeDeclaration = new CodeTypeDeclaration("name") { IsInterface = true }; var interfaceMethod = new CodeMemberMethod { Name = "name" }; interfaceMethod.Statements.Add(new CodeStatement()); var interfaceProperty = new CodeMemberProperty { Name = "name", PrivateImplementationType = new CodeTypeReference("0") }; interfaceProperty.GetStatements.Add(new CodeStatement()); interfaceProperty.SetStatements.Add(new CodeStatement()); interfaceTypeDeclaration.Members.Add(interfaceMethod); interfaceTypeDeclaration.Members.Add(interfaceProperty); yield return new object[] { interfaceTypeDeclaration }; var fullTypeDeclaration = new CodeTypeDeclaration("name"); fullTypeDeclaration.Comments.Add(new CodeCommentStatement()); fullTypeDeclaration.Comments.Add(new CodeCommentStatement("0")); fullTypeDeclaration.Comments.Add(new CodeCommentStatement("text")); fullTypeDeclaration.CustomAttributes.Add(new CodeAttributeDeclaration("attribute1")); fullTypeDeclaration.CustomAttributes.Add(new CodeAttributeDeclaration("attribute2", new CodeAttributeArgument("name", new CodePrimitiveExpression(1)))); fullTypeDeclaration.CustomAttributes.Add(new CodeAttributeDeclaration("attribute3", new CodeAttributeArgument(null, new CodePrimitiveExpression(1)), new CodeAttributeArgument(string.Empty, new CodePrimitiveExpression(1)), new CodeAttributeArgument("name", new CodePrimitiveExpression(1)))); fullTypeDeclaration.TypeParameters.Add(new CodeTypeParameter("parameter1")); fullTypeDeclaration.TypeParameters.Add(fullTypeParameter); fullTypeDeclaration.BaseTypes.Add(new CodeTypeReference((string)null)); fullTypeDeclaration.BaseTypes.Add(new CodeTypeReference(string.Empty)); fullTypeDeclaration.BaseTypes.Add(new CodeTypeReference("baseType1")); fullTypeDeclaration.BaseTypes.Add(new CodeTypeReference("baseType2`2", new CodeTypeReference("parameter1"), new CodeTypeReference("parameter2"))); fullTypeDeclaration.Members.Add(new CodeMemberEvent()); fullTypeDeclaration.Members.Add(new CodeMemberEvent { Name = "0" }); fullTypeDeclaration.Members.Add(new CodeMemberEvent { Name = "name", PrivateImplementationType = new CodeTypeReference() }); fullTypeDeclaration.Members.Add(new CodeMemberEvent { Name = "name", PrivateImplementationType = new CodeTypeReference("0") }); fullTypeDeclaration.Members.Add(fullEvent); fullTypeDeclaration.Members.Add(new CodeMemberField(new CodeTypeReference("type"), "name")); fullTypeDeclaration.Members.Add(fullField); fullTypeDeclaration.Members.Add(new CodeMemberMethod { Name = "name" }); fullTypeDeclaration.Members.Add(abstractMethod); fullTypeDeclaration.Members.Add(fullMethod); fullTypeDeclaration.Members.Add(new CodeEntryPointMethod()); fullTypeDeclaration.Members.Add(new CodeEntryPointMethod { Name = null }); fullTypeDeclaration.Members.Add(new CodeEntryPointMethod { Name = string.Empty }); fullTypeDeclaration.Members.Add(new CodeEntryPointMethod { Name = "name" }); fullTypeDeclaration.Members.Add(new CodeEntryPointMethod { Name = "0" }); fullTypeDeclaration.Members.Add(new CodeEntryPointMethod { Name = "name", ReturnType = new CodeTypeReference() }); fullTypeDeclaration.Members.Add(new CodeEntryPointMethod { Name = "name", ReturnType = new CodeTypeReference("0") }); fullTypeDeclaration.Members.Add(new CodeEntryPointMethod { Name = "name", PrivateImplementationType = new CodeTypeReference() }); fullTypeDeclaration.Members.Add(new CodeEntryPointMethod { Name = "name", PrivateImplementationType = new CodeTypeReference("0") }); fullTypeDeclaration.Members.Add(abstractEntryPointMethod); fullTypeDeclaration.Members.Add(fullEntryPointMethod); fullTypeDeclaration.Members.Add(new CodeConstructor()); fullTypeDeclaration.Members.Add(new CodeConstructor { Name = null }); fullTypeDeclaration.Members.Add(new CodeConstructor { Name = string.Empty }); fullTypeDeclaration.Members.Add(new CodeConstructor { Name = "0" }); fullTypeDeclaration.Members.Add(new CodeConstructor { Name = "name", ReturnType = new CodeTypeReference() }); fullTypeDeclaration.Members.Add(new CodeConstructor { Name = "name", ReturnType = new CodeTypeReference("0") }); fullTypeDeclaration.Members.Add(new CodeConstructor { Name = "name", PrivateImplementationType = new CodeTypeReference() }); fullTypeDeclaration.Members.Add(new CodeConstructor { Name = "name", PrivateImplementationType = new CodeTypeReference("0") }); fullTypeDeclaration.Members.Add(fullConstructor); fullTypeDeclaration.Members.Add(new CodeTypeConstructor()); fullTypeDeclaration.Members.Add(new CodeTypeConstructor { Name = null }); fullTypeDeclaration.Members.Add(new CodeTypeConstructor { Name = string.Empty }); fullTypeDeclaration.Members.Add(new CodeTypeConstructor { Name = "name" }); fullTypeDeclaration.Members.Add(new CodeTypeConstructor { Name = "0" }); fullTypeDeclaration.Members.Add(new CodeTypeConstructor { Name = "name", ReturnType = new CodeTypeReference() }); fullTypeDeclaration.Members.Add(new CodeTypeConstructor { Name = "name", ReturnType = new CodeTypeReference("0") }); fullTypeDeclaration.Members.Add(new CodeTypeConstructor { Name = "name", PrivateImplementationType = new CodeTypeReference() }); fullTypeDeclaration.Members.Add(new CodeTypeConstructor { Name = "name", PrivateImplementationType = new CodeTypeReference("0") }); fullTypeDeclaration.Members.Add(abstractTypeConstructor); fullTypeDeclaration.Members.Add(fullTypeConstructor); fullTypeDeclaration.Members.Add(new CodeMemberProperty { Name = "name" } ); fullTypeDeclaration.Members.Add(new CodeMemberProperty { Name = "item" } ); fullTypeDeclaration.Members.Add(abstractProperty); fullTypeDeclaration.Members.Add(fullItemPropertyLower); fullTypeDeclaration.Members.Add(fullItemPropertyUpper); fullTypeDeclaration.Members.Add(fullProperty); fullTypeDeclaration.Members.Add(new CodeSnippetTypeMember()); fullTypeDeclaration.Members.Add(new CodeSnippetTypeMember(null)); fullTypeDeclaration.Members.Add(new CodeSnippetTypeMember(string.Empty)); fullTypeDeclaration.Members.Add(new CodeSnippetTypeMember("text")); fullTypeDeclaration.Members.Add(fullSnippetTypeMember); yield return new object[] { fullTypeDeclaration }; // CodeTypeDelegate. yield return new object[] { new CodeTypeDelegate("name") }; var fullDelegate = new CodeTypeDelegate("name") { ReturnType = new CodeTypeReference("returnType") }; fullDelegate.Comments.Add(new CodeCommentStatement()); fullDelegate.Comments.Add(new CodeCommentStatement("0")); fullDelegate.Comments.Add(new CodeCommentStatement("text")); fullDelegate.CustomAttributes.Add(new CodeAttributeDeclaration("attribute1")); fullDelegate.CustomAttributes.Add(new CodeAttributeDeclaration("attribute2", new CodeAttributeArgument("name", new CodePrimitiveExpression(1)))); fullDelegate.CustomAttributes.Add(new CodeAttributeDeclaration("attribute3", new CodeAttributeArgument(null, new CodePrimitiveExpression(1)), new CodeAttributeArgument(string.Empty, new CodePrimitiveExpression(1)), new CodeAttributeArgument("name", new CodePrimitiveExpression(1)))); fullDelegate.TypeParameters.Add(new CodeTypeParameter("parameter1")); fullDelegate.TypeParameters.Add(fullTypeParameter); fullDelegate.BaseTypes.Add(new CodeTypeReference((string)null)); fullDelegate.BaseTypes.Add(new CodeTypeReference(string.Empty)); fullDelegate.BaseTypes.Add(new CodeTypeReference("baseType1")); fullDelegate.BaseTypes.Add(new CodeTypeReference("baseType2`2", new CodeTypeReference("parameter1"), new CodeTypeReference("parameter2"))); fullDelegate.Members.Add(new CodeMemberEvent()); fullDelegate.Members.Add(new CodeMemberEvent { Name = "0" }); fullDelegate.Members.Add(new CodeMemberEvent { Name = "name", PrivateImplementationType = new CodeTypeReference() }); fullDelegate.Members.Add(new CodeMemberEvent { Name = "name", PrivateImplementationType = new CodeTypeReference("0") }); fullDelegate.Members.Add(fullEvent); fullDelegate.Members.Add(new CodeMemberField(new CodeTypeReference("type"), "name")); fullDelegate.Members.Add(fullField); fullDelegate.Members.Add(new CodeMemberMethod { Name = "name" }); fullDelegate.Members.Add(abstractMethod); fullDelegate.Members.Add(fullMethod); fullDelegate.Members.Add(new CodeEntryPointMethod()); fullDelegate.Members.Add(new CodeEntryPointMethod { Name = null }); fullDelegate.Members.Add(new CodeEntryPointMethod { Name = string.Empty }); fullDelegate.Members.Add(new CodeEntryPointMethod { Name = "name" }); fullDelegate.Members.Add(new CodeEntryPointMethod { Name = "0" }); fullDelegate.Members.Add(new CodeEntryPointMethod { Name = "name", ReturnType = new CodeTypeReference() }); fullDelegate.Members.Add(new CodeEntryPointMethod { Name = "name", ReturnType = new CodeTypeReference("0") }); fullDelegate.Members.Add(new CodeEntryPointMethod { Name = "name", PrivateImplementationType = new CodeTypeReference() }); fullDelegate.Members.Add(new CodeEntryPointMethod { Name = "name", PrivateImplementationType = new CodeTypeReference("0") }); fullDelegate.Members.Add(abstractEntryPointMethod); fullDelegate.Members.Add(fullEntryPointMethod); fullDelegate.Members.Add(new CodeConstructor()); fullDelegate.Members.Add(new CodeConstructor { Name = null }); fullDelegate.Members.Add(new CodeConstructor { Name = string.Empty }); fullDelegate.Members.Add(new CodeConstructor { Name = "0" }); fullDelegate.Members.Add(new CodeConstructor { Name = "name", ReturnType = new CodeTypeReference() }); fullDelegate.Members.Add(new CodeConstructor { Name = "name", ReturnType = new CodeTypeReference("0") }); fullDelegate.Members.Add(new CodeConstructor { Name = "name", PrivateImplementationType = new CodeTypeReference() }); fullDelegate.Members.Add(new CodeConstructor { Name = "name", PrivateImplementationType = new CodeTypeReference("0") }); fullDelegate.Members.Add(fullConstructor); fullDelegate.Members.Add(new CodeTypeConstructor()); fullDelegate.Members.Add(new CodeTypeConstructor { Name = null }); fullDelegate.Members.Add(new CodeTypeConstructor { Name = string.Empty }); fullDelegate.Members.Add(new CodeTypeConstructor { Name = "name" }); fullDelegate.Members.Add(new CodeTypeConstructor { Name = "0" }); fullDelegate.Members.Add(new CodeTypeConstructor { Name = "name", ReturnType = new CodeTypeReference() }); fullDelegate.Members.Add(new CodeTypeConstructor { Name = "name", ReturnType = new CodeTypeReference("0") }); fullDelegate.Members.Add(new CodeTypeConstructor { Name = "name", PrivateImplementationType = new CodeTypeReference() }); fullDelegate.Members.Add(new CodeTypeConstructor { Name = "name", PrivateImplementationType = new CodeTypeReference("0") }); fullDelegate.Members.Add(abstractTypeConstructor); fullDelegate.Members.Add(fullTypeConstructor); fullDelegate.Members.Add(new CodeMemberProperty { Name = "name" } ); fullDelegate.Members.Add(new CodeMemberProperty { Name = "item" } ); fullDelegate.Members.Add(abstractProperty); fullDelegate.Members.Add(fullItemPropertyLower); fullDelegate.Members.Add(fullItemPropertyUpper); fullDelegate.Members.Add(fullProperty); fullDelegate.Members.Add(new CodeSnippetTypeMember()); fullDelegate.Members.Add(new CodeSnippetTypeMember(null)); fullDelegate.Members.Add(new CodeSnippetTypeMember(string.Empty)); fullDelegate.Members.Add(new CodeSnippetTypeMember("text")); fullDelegate.Members.Add(fullSnippetTypeMember); fullDelegate.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference("type"), "name")); fullDelegate.Parameters.Add(fullParameter); yield return new object[] { fullDelegate }; // CodeNamespace. yield return new object[] { new CodeNamespace() }; yield return new object[] { new CodeNamespace(null) }; yield return new object[] { new CodeNamespace(string.Empty) }; yield return new object[] { new CodeNamespace("name") }; var fullNamespace = new CodeNamespace("name"); fullNamespace.Comments.Add(new CodeCommentStatement()); fullNamespace.Comments.Add(new CodeCommentStatement("0")); fullNamespace.Comments.Add(new CodeCommentStatement("text")); fullNamespace.Imports.Add(new CodeNamespaceImport("nameSpace1")); fullNamespace.Imports.Add(fullNamespaceImport); fullNamespace.Types.Add(new CodeTypeDeclaration("name")); fullNamespace.Types.Add(interfaceTypeDeclaration); fullNamespace.Types.Add(fullTypeDeclaration); yield return new object[] { fullNamespace }; // CodeCompileUnit. yield return new object[] { new CodeCompileUnit() }; var fullCompileUnit = new CodeCompileUnit(); fullCompileUnit.StartDirectives.Add(new CodeChecksumPragma()); fullCompileUnit.StartDirectives.Add(new CodeChecksumPragma("fileName", Guid.NewGuid(), new byte[0])); fullCompileUnit.EndDirectives.Add(new CodeChecksumPragma()); fullCompileUnit.EndDirectives.Add(new CodeChecksumPragma("fileName", Guid.NewGuid(), new byte[0])); fullCompileUnit.AssemblyCustomAttributes.Add(new CodeAttributeDeclaration("attribute1")); fullCompileUnit.AssemblyCustomAttributes.Add(new CodeAttributeDeclaration("attribute2", new CodeAttributeArgument("name", new CodePrimitiveExpression(1)))); fullCompileUnit.AssemblyCustomAttributes.Add(new CodeAttributeDeclaration("attribute3", new CodeAttributeArgument(null, new CodePrimitiveExpression(1)), new CodeAttributeArgument(string.Empty, new CodePrimitiveExpression(1)), new CodeAttributeArgument("name", new CodePrimitiveExpression(1)))); fullCompileUnit.Namespaces.Add(new CodeNamespace()); fullCompileUnit.Namespaces.Add(new CodeNamespace(null)); fullCompileUnit.Namespaces.Add(new CodeNamespace(string.Empty)); fullCompileUnit.Namespaces.Add(fullNamespace); fullCompileUnit.ReferencedAssemblies.Add(""); fullCompileUnit.ReferencedAssemblies.Add("0"); fullCompileUnit.ReferencedAssemblies.Add("assembly"); yield return new object[] { fullCompileUnit }; // CodeSnippetCompileUnit. yield return new object[] { new CodeSnippetCompileUnit() }; yield return new object[] { new CodeSnippetCompileUnit(null) }; yield return new object[] { new CodeSnippetCompileUnit("") }; yield return new object[] { new CodeSnippetCompileUnit("0") }; yield return new object[] { new CodeSnippetCompileUnit("value") }; var fullSnippetCompileUnit = new CodeSnippetCompileUnit("value") { LinePragma = new CodeLinePragma() }; fullSnippetCompileUnit.StartDirectives.Add(new CodeChecksumPragma()); fullSnippetCompileUnit.StartDirectives.Add(new CodeChecksumPragma("fileName", Guid.NewGuid(), new byte[0])); fullSnippetCompileUnit.EndDirectives.Add(new CodeChecksumPragma()); fullSnippetCompileUnit.EndDirectives.Add(new CodeChecksumPragma("fileName", Guid.NewGuid(), new byte[0])); fullSnippetCompileUnit.AssemblyCustomAttributes.Add(new CodeAttributeDeclaration("attribute1")); fullSnippetCompileUnit.AssemblyCustomAttributes.Add(new CodeAttributeDeclaration("attribute2", new CodeAttributeArgument("name", new CodePrimitiveExpression(1)))); fullSnippetCompileUnit.AssemblyCustomAttributes.Add(new CodeAttributeDeclaration("attribute3", new CodeAttributeArgument(null, new CodePrimitiveExpression(1)), new CodeAttributeArgument(string.Empty, new CodePrimitiveExpression(1)), new CodeAttributeArgument("name", new CodePrimitiveExpression(1)))); fullSnippetCompileUnit.AssemblyCustomAttributes.Add(new CodeAttributeDeclaration()); fullSnippetCompileUnit.AssemblyCustomAttributes.Add(new CodeAttributeDeclaration((string)null)); fullSnippetCompileUnit.AssemblyCustomAttributes.Add(new CodeAttributeDeclaration(string.Empty)); fullSnippetCompileUnit.AssemblyCustomAttributes.Add(new CodeAttributeDeclaration("0")); fullSnippetCompileUnit.AssemblyCustomAttributes.Add(new CodeAttributeDeclaration("name", new CodeAttributeArgument("0", new CodePrimitiveExpression(1)))); fullSnippetCompileUnit.AssemblyCustomAttributes.Add(new CodeAttributeDeclaration("name", new CodeAttributeArgument("name", new CodeExpression()))); fullSnippetCompileUnit.Namespaces.Add(new CodeNamespace()); fullSnippetCompileUnit.Namespaces.Add(new CodeNamespace(null)); fullSnippetCompileUnit.Namespaces.Add(new CodeNamespace(string.Empty)); fullSnippetCompileUnit.Namespaces.Add(new CodeNamespace("0")); fullSnippetCompileUnit.Namespaces.Add(fullNamespace); fullSnippetCompileUnit.ReferencedAssemblies.Add(""); fullSnippetCompileUnit.ReferencedAssemblies.Add("0"); fullSnippetCompileUnit.ReferencedAssemblies.Add("assembly"); yield return new object[] { fullSnippetCompileUnit }; // CodeTypeReference. yield return new object[] { new CodeTypeReference((string)null) }; yield return new object[] { new CodeTypeReference(string.Empty) }; yield return new object[] { new CodeTypeReference("name") }; yield return new object[] { new CodeTypeReference("name`") }; yield return new object[] { new CodeTypeReference("name`1") }; yield return new object[] { new CodeTypeReference("name`2[]") }; var fullTypeReference = new CodeTypeReference("name`2"); fullTypeReference.TypeArguments.Add("type1"); fullTypeReference.TypeArguments.Add("type2"); yield return new object[] { fullTypeReference }; // CodeArrayCreateExpression. yield return new object[] { new CodeArrayCreateExpression() }; yield return new object[] { new CodeArrayCreateExpression(new CodeTypeReference((string)null)) }; yield return new object[] { new CodeArrayCreateExpression(new CodeTypeReference(string.Empty)) }; yield return new object[] { new CodeArrayCreateExpression(new CodeTypeReference("name")) }; yield return new object[] { new CodeArrayCreateExpression(new CodeTypeReference("name")) { SizeExpression = new CodePrimitiveExpression(1) } } ; yield return new object[] { new CodeArrayCreateExpression(new CodeTypeReference("name"), new CodeExpression[] { new CodePrimitiveExpression() }) { SizeExpression = new CodePrimitiveExpression(1) } }; yield return new object[] { new CodeArrayCreateExpression(new CodeTypeReference("name"), new CodeExpression[] { new CodePrimitiveExpression(1) }) }; yield return new object[] { new CodeArrayCreateExpression(new CodeTypeReference("name"), new CodeExpression[] { new CodePrimitiveExpression(1) }) { SizeExpression = new CodeExpression() } }; // CodeBaseReferenceExpression. yield return new object[] { new CodeBaseReferenceExpression() }; // CodeBinaryOperatorExpression. yield return new object[] { new CodeBinaryOperatorExpression(new CodePrimitiveExpression(1), CodeBinaryOperatorType.Add, new CodePrimitiveExpression(2)) }; // CodeCastExpression. yield return new object[] { new CodeCastExpression(new CodeTypeReference((string)null), new CodePrimitiveExpression(1)) }; yield return new object[] { new CodeCastExpression(new CodeTypeReference(string.Empty), new CodePrimitiveExpression(1)) }; yield return new object[] { new CodeCastExpression(new CodeTypeReference("name"), new CodePrimitiveExpression(1)) }; // CodeDefaultValueExpression. yield return new object[] { new CodeDefaultValueExpression() }; yield return new object[] { new CodeDefaultValueExpression(new CodeTypeReference((string)null)) }; yield return new object[] { new CodeDefaultValueExpression(new CodeTypeReference(string.Empty)) }; yield return new object[] { new CodeDefaultValueExpression(new CodeTypeReference("name")) }; // CodeDelegateCreateExpression. yield return new object[] { new CodeDelegateCreateExpression(new CodeTypeReference("name"), new CodePrimitiveExpression(1), "methodName") }; // CodeFieldReferenceExpression. yield return new object[] { new CodeFieldReferenceExpression(null, "name") }; yield return new object[] { new CodeFieldReferenceExpression(new CodePrimitiveExpression(1), "name") }; // CodeArgumentReferenceExpression. yield return new object[] { new CodeArgumentReferenceExpression("name") }; // CodeVariableReferenceExpression. yield return new object[] { new CodeVariableReferenceExpression("name") }; // CodeIndexerExpression. yield return new object[] { new CodeIndexerExpression(new CodePrimitiveExpression(1)) }; yield return new object[] { new CodeIndexerExpression(new CodePrimitiveExpression(1), new CodePrimitiveExpression(2)) }; // CodeArrayIndexerExpression. yield return new object[] { new CodeArrayIndexerExpression(new CodePrimitiveExpression(1)) }; yield return new object[] { new CodeArrayIndexerExpression(new CodePrimitiveExpression(1), new CodePrimitiveExpression(2)) }; // CodeSnippetExpression. yield return new object[] { new CodeSnippetExpression() }; yield return new object[] { new CodeSnippetExpression(null) }; yield return new object[] { new CodeSnippetExpression(string.Empty) }; yield return new object[] { new CodeSnippetExpression("0") }; yield return new object[] { new CodeSnippetExpression("name") }; // CodeMethodInvokeExpression. yield return new object[] { new CodeMethodInvokeExpression(new CodeMethodReferenceExpression(null, "name")) }; yield return new object[] { new CodeMethodInvokeExpression(new CodeMethodReferenceExpression(new CodePrimitiveExpression(1), "name")) }; yield return new object[] { new CodeMethodInvokeExpression(new CodeMethodReferenceExpression(new CodePrimitiveExpression(1), "name", new CodeTypeReference[] { new CodeTypeReference((string)null), new CodeTypeReference(string.Empty), new CodeTypeReference("name") }), new CodePrimitiveExpression(1)) }; // CodeMethodReferenceExpression. yield return new object[] { new CodeMethodReferenceExpression(null, "name") }; yield return new object[] { new CodeMethodReferenceExpression(new CodePrimitiveExpression(1), "name") }; yield return new object[] { new CodeMethodReferenceExpression(new CodePrimitiveExpression(1), "name", new CodeTypeReference[] { new CodeTypeReference((string)null), new CodeTypeReference(string.Empty), new CodeTypeReference("name") }) }; // CodeEventReferenceExpression. yield return new object[] { new CodeEventReferenceExpression(null, "name") }; yield return new object[] { new CodeEventReferenceExpression(new CodePrimitiveExpression(1), "name") }; // CodeDelegateInvokeExpression. yield return new object[] { new CodeDelegateInvokeExpression() }; yield return new object[] { new CodeDelegateInvokeExpression(new CodePrimitiveExpression(1)) }; yield return new object[] { new CodeDelegateInvokeExpression(new CodePrimitiveExpression(1), new CodePrimitiveExpression(2)) }; // CodeObjectCreateExpression. yield return new object[] { new CodeObjectCreateExpression() }; yield return new object[] { new CodeObjectCreateExpression(new CodeTypeReference("name")) }; yield return new object[] { new CodeObjectCreateExpression(new CodeTypeReference("name"), new CodePrimitiveExpression(1)) }; // CodeDirectionExpression. yield return new object[] { new CodeDirectionExpression(FieldDirection.In, new CodePrimitiveExpression(1)) }; // CodePrimitiveExpression. yield return new object[] { new CodePrimitiveExpression() }; yield return new object[] { new CodePrimitiveExpression(1) }; yield return new object[] { new CodePrimitiveExpression(null) }; yield return new object[] { new CodePrimitiveExpression(string.Empty) }; yield return new object[] { new CodePrimitiveExpression("0") }; yield return new object[] { new CodePrimitiveExpression("name") }; // CodePropertyReferenceExpression. yield return new object[] { new CodePropertyReferenceExpression(null, "name") }; yield return new object[] { new CodePropertyReferenceExpression(new CodePrimitiveExpression(1), "name") }; // CodePropertySetValueReferenceExpression. yield return new object[] { new CodePropertySetValueReferenceExpression() }; // CodeThisReferenceExpression. yield return new object[] { new CodeThisReferenceExpression() }; // CodeTypeReferenceExpression. yield return new object[] { new CodeTypeReferenceExpression() }; yield return new object[] { new CodeTypeReferenceExpression(new CodeTypeReference("name")) }; // CodeTypeOfExpression. yield return new object[] { new CodeTypeOfExpression() }; yield return new object[] { new CodeTypeOfExpression(new CodeTypeReference("name")) }; // CodeMethodReturnStatement. yield return new object[] { new CodeMethodReturnStatement() }; yield return new object[] { new CodeMethodReturnStatement(null) }; yield return new object[] { new CodeMethodReturnStatement(new CodePrimitiveExpression("1")) }; // CodeConditionStatement. yield return new object[] { new CodeConditionStatement(new CodePrimitiveExpression("1")) }; yield return new object[] { new CodeConditionStatement(new CodePrimitiveExpression("1"), new CodeStatement[] { new CodeMethodReturnStatement(), new CodeMethodReturnStatement { LinePragma = new CodeLinePragma() } }, new CodeStatement[0]) }; yield return new object[] { new CodeConditionStatement(new CodePrimitiveExpression("1"), new CodeStatement[0], new CodeStatement[] { new CodeMethodReturnStatement(), new CodeMethodReturnStatement { LinePragma = new CodeLinePragma() } }) }; yield return new object[] { new CodeConditionStatement(new CodePrimitiveExpression("1"), new CodeStatement[] { new CodeMethodReturnStatement(), new CodeMethodReturnStatement { LinePragma = new CodeLinePragma() } }, new CodeStatement[] { new CodeMethodReturnStatement(), new CodeMethodReturnStatement { LinePragma = new CodeLinePragma() } }) }; // CodeTryCatchFinallyStatement. yield return new object[] { new CodeTryCatchFinallyStatement() }; yield return new object[] { new CodeTryCatchFinallyStatement( new CodeStatement[] { new CodeMethodReturnStatement(), new CodeMethodReturnStatement { LinePragma = new CodeLinePragma() } }, new CodeCatchClause[] { new CodeCatchClause("localName"), new CodeCatchClause("localName", new CodeTypeReference("exceptionType")), new CodeCatchClause("localName", new CodeTypeReference("exceptionType"), new CodeMethodReturnStatement(), new CodeMethodReturnStatement { LinePragma = new CodeLinePragma() }) }, new CodeStatement[] { new CodeMethodReturnStatement(), new CodeMethodReturnStatement { LinePragma = new CodeLinePragma() } } ) }; yield return new object[] { new CodeTryCatchFinallyStatement( new CodeStatement[0], new CodeCatchClause[] { new CodeCatchClause("localName"), new CodeCatchClause("localName", new CodeTypeReference("exceptionType")), new CodeCatchClause("localName", new CodeTypeReference("exceptionType"), new CodeMethodReturnStatement(), new CodeMethodReturnStatement { LinePragma = new CodeLinePragma() }) }, new CodeStatement[] { new CodeMethodReturnStatement(), new CodeMethodReturnStatement { LinePragma = new CodeLinePragma() } } ) }; yield return new object[] { new CodeTryCatchFinallyStatement( new CodeStatement[] { new CodeMethodReturnStatement(), new CodeMethodReturnStatement { LinePragma = new CodeLinePragma() } }, new CodeCatchClause[0], new CodeStatement[] { new CodeMethodReturnStatement(), new CodeMethodReturnStatement { LinePragma = new CodeLinePragma() } } ) }; yield return new object[] { new CodeTryCatchFinallyStatement( new CodeStatement[] { new CodeMethodReturnStatement(), new CodeMethodReturnStatement { LinePragma = new CodeLinePragma() } }, new CodeCatchClause[] { new CodeCatchClause("localName"), new CodeCatchClause("localName", new CodeTypeReference("exceptionType")), new CodeCatchClause("localName", new CodeTypeReference("exceptionType"), new CodeMethodReturnStatement(), new CodeMethodReturnStatement { LinePragma = new CodeLinePragma() }) }, new CodeStatement[0] ) }; // CodeAssignStatement. yield return new object[] { new CodeAssignStatement(new CodePrimitiveExpression(1), new CodePrimitiveExpression(1)) }; // CodeExpressionStatement. yield return new object[] { new CodeExpressionStatement(new CodePrimitiveExpression("1")) }; // CodeIterationStatement. yield return new object[] { new CodeIterationStatement(new CodeMethodReturnStatement(), new CodePrimitiveExpression(1), new CodeMethodReturnStatement(), new CodeMethodReturnStatement(), new CodeMethodReturnStatement { LinePragma = new CodeLinePragma() }) }; yield return new object[] { new CodeIterationStatement(new CodeMethodReturnStatement(), new CodePrimitiveExpression(1), new CodeMethodReturnStatement()) }; // CodeThrowExceptionStatement. yield return new object[] { new CodeThrowExceptionStatement() }; yield return new object[] { new CodeThrowExceptionStatement(null) }; yield return new object[] { new CodeThrowExceptionStatement(new CodePrimitiveExpression(1)) }; // CodeSnippetStatement. yield return new object[] { new CodeSnippetStatement() }; yield return new object[] { new CodeSnippetStatement(null) }; yield return new object[] { new CodeSnippetStatement(string.Empty) }; yield return new object[] { new CodeSnippetStatement("value") }; // CodeVariableDeclarationStatement. yield return new object[] { new CodeVariableDeclarationStatement(new CodeTypeReference("name"), "name") }; yield return new object[] { new CodeVariableDeclarationStatement(new CodeTypeReference("name"), "name", new CodePrimitiveExpression(1)) }; // CodeAttachEventStatement. yield return new object[] { new CodeAttachEventStatement(new CodeEventReferenceExpression(null, "name"), new CodePrimitiveExpression(1)) }; yield return new object[] { new CodeAttachEventStatement(new CodeEventReferenceExpression(new CodePrimitiveExpression(1), "name"), new CodePrimitiveExpression(1)) }; // CodeRemoveEventStatement. yield return new object[] { new CodeRemoveEventStatement(new CodeEventReferenceExpression(null, "name"), new CodePrimitiveExpression(1)) }; yield return new object[] { new CodeRemoveEventStatement(new CodeEventReferenceExpression(new CodePrimitiveExpression(1), "name"), new CodePrimitiveExpression(1)) }; // CodeGotoStatement. yield return new object[] { new CodeGotoStatement("name") }; // CodeLabeledStatement. yield return new object[] { new CodeLabeledStatement("name") }; yield return new object[] { new CodeLabeledStatement("name", null) }; yield return new object[] { new CodeLabeledStatement("name", new CodeMethodReturnStatement()) }; // Misc. yield return new object[] { new CodeTypeReference(":") }; yield return new object[] { new CodeTypeReference(".") }; yield return new object[] { new CodeTypeReference("$") }; yield return new object[] { new CodeTypeReference("+") }; yield return new object[] { new CodeTypeReference("<") }; yield return new object[] { new CodeTypeReference(">") }; yield return new object[] { new CodeTypeReference("-") }; yield return new object[] { new CodeTypeReference("[") }; yield return new object[] { new CodeTypeReference("]") }; yield return new object[] { new CodeTypeReference(",") }; yield return new object[] { new CodeTypeReference("&") }; yield return new object[] { new CodeTypeReference("*") }; yield return new object[] { new CodeTypeReference("_abc") }; } [Theory] [MemberData(nameof(ValidateIdentifiers_Valid_TestData))] public void ValidateIdentifiers_InvokeValid_Nop(CodeObject e) { CodeGenerator.ValidateIdentifiers(e); } public static IEnumerable<object[]> ValidateIdentifiers_Invalid_TestData() { // CodeTypeReference. yield return new object[] { new CodeTypeReference() }; yield return new object[] { new CodeTypeReference("0") }; var invalidTypeReference1 = new CodeTypeReference("name`2"); invalidTypeReference1.TypeArguments.Add("type1"); yield return new object[] { invalidTypeReference1 }; var invalidTypeReference2 = new CodeTypeReference("name`2"); invalidTypeReference2.TypeArguments.Add(new CodeTypeReference()); invalidTypeReference2.TypeArguments.Add("name"); yield return new object[] { invalidTypeReference2 }; var invalidTypeReference3 = new CodeTypeReference("name`2"); invalidTypeReference3.TypeArguments.Add(new CodeTypeReference("0")); invalidTypeReference3.TypeArguments.Add("name"); yield return new object[] { invalidTypeReference3 }; // CodeChecksumPragma. yield return new object[] { new CodeChecksumPragma("\0", Guid.NewGuid(), new byte[0]) }; // CodeRegionDirective. foreach (char newLineChar in new char[] { '\r', '\n', '\u2028', '\u2029', '\u0085' }) { yield return new object[] { new CodeRegionDirective(CodeRegionMode.None, $"te{newLineChar}xt") }; } // CodeNamespaceImport. yield return new object[] { new CodeNamespaceImport() }; yield return new object[] { new CodeNamespaceImport(null) }; yield return new object[] { new CodeNamespaceImport(string.Empty) }; yield return new object[] { new CodeNamespaceImport("0") }; var invalidNamespaceImport1 = new CodeNamespace(); invalidNamespaceImport1.Imports.Add(new CodeNamespaceImport()); yield return new object[] { invalidNamespaceImport1 }; var invalidNamespaceImport2 = new CodeNamespace(); invalidNamespaceImport2.Imports.Add(new CodeNamespaceImport(string.Empty)); yield return new object[] { invalidNamespaceImport2 }; var invalidNamespaceImport3 = new CodeNamespace(); invalidNamespaceImport3.Imports.Add(new CodeNamespaceImport(string.Empty)); yield return new object[] { invalidNamespaceImport3 }; // CodeMemberEvent. yield return new object[] { new CodeMemberEvent { PrivateImplementationType = new CodeTypeReference("name") } }; yield return new object[] { new CodeMemberEvent { Name = null, PrivateImplementationType = new CodeTypeReference("name") } }; yield return new object[] { new CodeMemberEvent { Name = string.Empty, PrivateImplementationType = new CodeTypeReference("name") } }; yield return new object[] { new CodeMemberEvent { Name = "0", PrivateImplementationType = new CodeTypeReference("name") } }; yield return new object[] { new CodeMemberEvent { Name = "name", PrivateImplementationType = new CodeTypeReference("name"), Type = new CodeTypeReference() } }; yield return new object[] { new CodeMemberEvent { Name = "name", PrivateImplementationType = new CodeTypeReference("name"), Type = new CodeTypeReference("0") } }; yield return new object[] { new CodeMemberEvent { Name = "name", PrivateImplementationType = new CodeTypeReference("name"), Type = invalidTypeReference1 } }; yield return new object[] { new CodeMemberEvent { Name = "name", PrivateImplementationType = new CodeTypeReference("name"), Type = invalidTypeReference2 } }; yield return new object[] { new CodeMemberEvent { Name = "name", PrivateImplementationType = new CodeTypeReference("name"), Type = invalidTypeReference3 } }; var invalidEventStartDirective1 = new CodeMemberEvent(); invalidEventStartDirective1.StartDirectives.Add(new CodeDirective()); yield return new object[] { invalidEventStartDirective1 }; var invalidEventStartDirective2 = new CodeMemberEvent(); invalidEventStartDirective2.StartDirectives.Add(new CodeChecksumPragma("\0", Guid.NewGuid(), new byte[0])); yield return new object[] { invalidEventStartDirective2 }; foreach (char newLineChar in new char[] { '\r', '\n', '\u2028', '\u2029', '\u0085' }) { var invalidEventStartDirective3 = new CodeMemberEvent(); invalidEventStartDirective3.StartDirectives.Add(new CodeRegionDirective(CodeRegionMode.None, $"te{newLineChar}xt")); yield return new object[] { invalidEventStartDirective3 }; } var invalidEventEndDirective1 = new CodeMemberEvent(); invalidEventEndDirective1.EndDirectives.Add(new CodeDirective()); yield return new object[] { invalidEventEndDirective1 }; var invalidEventEndDirective2 = new CodeMemberEvent(); invalidEventEndDirective2.EndDirectives.Add(new CodeChecksumPragma("\0", Guid.NewGuid(), new byte[0])); yield return new object[] { invalidEventEndDirective2 }; foreach (char newLineChar in new char[] { '\r', '\n', '\u2028', '\u2029', '\u0085' }) { var invalidEventEndDirective3 = new CodeMemberEvent(); invalidEventEndDirective3.EndDirectives.Add(new CodeRegionDirective(CodeRegionMode.None, $"te{newLineChar}xt")); yield return new object[] { invalidEventEndDirective3 }; } var invalidEventImplementationType1 = new CodeMemberEvent(); invalidEventImplementationType1.ImplementationTypes.Add(new CodeTypeReference()); yield return new object[] { invalidEventImplementationType1 }; var invalidEventImplementationType2 = new CodeMemberEvent(); invalidEventImplementationType2.ImplementationTypes.Add(new CodeTypeReference("0")); yield return new object[] { invalidEventImplementationType2 }; var invalidEventImplementationType3 = new CodeMemberEvent(); invalidEventImplementationType3.ImplementationTypes.Add(invalidTypeReference1); yield return new object[] { invalidEventImplementationType3 }; var invalidEventImplementationType4 = new CodeMemberEvent(); invalidEventImplementationType4.ImplementationTypes.Add(invalidTypeReference2); yield return new object[] { invalidEventImplementationType4 }; var invalidEventImplementationType5 = new CodeMemberEvent(); invalidEventImplementationType5.ImplementationTypes.Add(invalidTypeReference3); yield return new object[] { invalidEventImplementationType5 }; // CodeMemberField. yield return new object[] { new CodeMemberField() }; yield return new object[] { new CodeMemberField(new CodeTypeReference(), "name") }; yield return new object[] { new CodeMemberField(new CodeTypeReference("0"), "name") }; yield return new object[] { new CodeMemberField(invalidTypeReference1, "name") }; yield return new object[] { new CodeMemberField(invalidTypeReference2, "name") }; yield return new object[] { new CodeMemberField(invalidTypeReference3, "name") }; yield return new object[] { new CodeMemberField(new CodeTypeReference("type"), null) }; yield return new object[] { new CodeMemberField(new CodeTypeReference("type"), string.Empty) }; yield return new object[] { new CodeMemberField(new CodeTypeReference("type"), "0") }; var invalidFieldAttribute1 = new CodeMemberField(new CodeTypeReference("type"), "name"); invalidFieldAttribute1.CustomAttributes.Add(new CodeAttributeDeclaration()); yield return new object[] { invalidFieldAttribute1 }; var invalidFieldAttribute2 = new CodeMemberField(new CodeTypeReference("type"), "name"); invalidFieldAttribute2.CustomAttributes.Add(new CodeAttributeDeclaration((string)null)); yield return new object[] { invalidFieldAttribute2 }; var invalidFieldAttribute3 = new CodeMemberField(new CodeTypeReference("type"), "name"); invalidFieldAttribute3.CustomAttributes.Add(new CodeAttributeDeclaration(string.Empty)); yield return new object[] { invalidFieldAttribute3 }; var invalidFieldAttribute4 = new CodeMemberField(new CodeTypeReference("type"), "name"); invalidFieldAttribute4.CustomAttributes.Add(new CodeAttributeDeclaration("0")); yield return new object[] { invalidFieldAttribute4 }; var invalidFieldAttribute5 = new CodeMemberField(new CodeTypeReference("type"), "name"); invalidFieldAttribute5.CustomAttributes.Add(new CodeAttributeDeclaration("name", new CodeAttributeArgument("0", new CodePrimitiveExpression(1)))); yield return new object[] { invalidFieldAttribute5 }; var invalidFieldAttribute6 = new CodeMemberField(new CodeTypeReference("type"), "name"); invalidFieldAttribute6.CustomAttributes.Add(new CodeAttributeDeclaration("name", new CodeAttributeArgument("name", new CodeExpression()))); yield return new object[] { invalidFieldAttribute6 }; var invalidFieldStartDirective1 = new CodeMemberField(new CodeTypeReference("type"), "name"); invalidFieldStartDirective1.StartDirectives.Add(new CodeDirective()); yield return new object[] { invalidFieldStartDirective1 }; var invalidFieldStartDirective2 = new CodeMemberField(new CodeTypeReference("type"), "name"); invalidFieldStartDirective2.StartDirectives.Add(new CodeChecksumPragma("\0", Guid.NewGuid(), new byte[0])); yield return new object[] { invalidFieldStartDirective2 }; foreach (char newLineChar in new char[] { '\r', '\n', '\u2028', '\u2029', '\u0085' }) { var invalidFieldStartDirective3 = new CodeMemberField(new CodeTypeReference("type"), "name"); invalidFieldStartDirective3.StartDirectives.Add(new CodeRegionDirective(CodeRegionMode.None, $"te{newLineChar}xt")); yield return new object[] { invalidFieldStartDirective3 }; } var invalidFieldEndDirective1 = new CodeMemberField(new CodeTypeReference("type"), "name"); invalidFieldEndDirective1.EndDirectives.Add(new CodeDirective()); yield return new object[] { invalidFieldEndDirective1 }; var invalidFieldEndDirective2 = new CodeMemberField(new CodeTypeReference("type"), "name"); invalidFieldEndDirective2.EndDirectives.Add(new CodeChecksumPragma("\0", Guid.NewGuid(), new byte[0])); yield return new object[] { invalidFieldEndDirective2 }; foreach (char newLineChar in new char[] { '\r', '\n', '\u2028', '\u2029', '\u0085' }) { var invalidFieldEndDirective3 = new CodeMemberField(new CodeTypeReference("type"), "name"); invalidFieldEndDirective3.EndDirectives.Add(new CodeRegionDirective(CodeRegionMode.None, $"te{newLineChar}xt")); yield return new object[] { invalidFieldEndDirective3 }; } yield return new object[] { new CodeMemberField(new CodeTypeReference("type"), "name") { InitExpression = new CodeExpression() } }; // CodeParameterDeclarationExpression. yield return new object[] { new CodeParameterDeclarationExpression() }; yield return new object[] { new CodeParameterDeclarationExpression(new CodeTypeReference(), "name") }; yield return new object[] { new CodeParameterDeclarationExpression(new CodeTypeReference("0"), "name") }; yield return new object[] { new CodeParameterDeclarationExpression(invalidTypeReference1, "name") }; yield return new object[] { new CodeParameterDeclarationExpression(invalidTypeReference2, "name") }; yield return new object[] { new CodeParameterDeclarationExpression(invalidTypeReference3, "name") }; yield return new object[] { new CodeParameterDeclarationExpression(new CodeTypeReference("type"), null) }; yield return new object[] { new CodeParameterDeclarationExpression(new CodeTypeReference("type"), string.Empty) }; yield return new object[] { new CodeParameterDeclarationExpression(new CodeTypeReference("type"), "0") }; var invalidParameterAttribute1 = new CodeParameterDeclarationExpression(new CodeTypeReference("type"), "name"); invalidParameterAttribute1.CustomAttributes.Add(new CodeAttributeDeclaration()); yield return new object[] { invalidParameterAttribute1 }; var invalidParameterAttribute2 = new CodeParameterDeclarationExpression(new CodeTypeReference("type"), "name"); invalidParameterAttribute2.CustomAttributes.Add(new CodeAttributeDeclaration((string)null)); yield return new object[] { invalidParameterAttribute2 }; var invalidParameterAttribute3 = new CodeParameterDeclarationExpression(new CodeTypeReference("type"), "name"); invalidParameterAttribute3.CustomAttributes.Add(new CodeAttributeDeclaration(string.Empty)); yield return new object[] { invalidParameterAttribute3 }; var invalidParameterAttribute4 = new CodeParameterDeclarationExpression(new CodeTypeReference("type"), "name"); invalidParameterAttribute4.CustomAttributes.Add(new CodeAttributeDeclaration("0")); yield return new object[] { invalidParameterAttribute4 }; var invalidParameterAttribute5 = new CodeParameterDeclarationExpression(new CodeTypeReference("type"), "name"); invalidParameterAttribute5.CustomAttributes.Add(new CodeAttributeDeclaration("name", new CodeAttributeArgument("0", new CodePrimitiveExpression(1)))); yield return new object[] { invalidParameterAttribute5 }; var invalidParameterAttribute6 = new CodeParameterDeclarationExpression(new CodeTypeReference("type"), "name"); invalidParameterAttribute6.CustomAttributes.Add(new CodeAttributeDeclaration("name", new CodeAttributeArgument("name", new CodeExpression()))); yield return new object[] { invalidParameterAttribute6 }; // CodeMemberMethod. yield return new object[] { new CodeMemberMethod() }; yield return new object[] { new CodeMemberMethod { Name = null } }; yield return new object[] { new CodeMemberMethod { Name = string.Empty } }; yield return new object[] { new CodeMemberMethod { Name = "0" } }; yield return new object[] { new CodeMemberMethod { Name = "name", ReturnType = new CodeTypeReference() } }; yield return new object[] { new CodeMemberMethod { Name = "name", ReturnType = new CodeTypeReference("0") } }; yield return new object[] { new CodeMemberMethod { Name = "name", ReturnType = invalidTypeReference1 } }; yield return new object[] { new CodeMemberMethod { Name = "name", ReturnType = invalidTypeReference2 } }; yield return new object[] { new CodeMemberMethod { Name = "name", ReturnType = invalidTypeReference3 } }; yield return new object[] { new CodeMemberMethod { Name = "name", PrivateImplementationType = new CodeTypeReference() } }; yield return new object[] { new CodeMemberMethod { Name = "name", PrivateImplementationType = new CodeTypeReference("0") } }; yield return new object[] { new CodeMemberMethod { Name = "name", PrivateImplementationType = invalidTypeReference1 } }; yield return new object[] { new CodeMemberMethod { Name = "name", PrivateImplementationType = invalidTypeReference2 } }; yield return new object[] { new CodeMemberMethod { Name = "name", PrivateImplementationType = invalidTypeReference3 } }; var invalidMethodAttribute1 = new CodeMemberMethod { Name = "name" }; invalidMethodAttribute1.CustomAttributes.Add(new CodeAttributeDeclaration()); yield return new object[] { invalidMethodAttribute1 }; var invalidMethodAttribute2 = new CodeMemberMethod { Name = "name" }; invalidMethodAttribute2.CustomAttributes.Add(new CodeAttributeDeclaration((string)null)); yield return new object[] { invalidMethodAttribute2 }; var invalidMethodAttribute3 = new CodeMemberMethod { Name = "name" }; invalidMethodAttribute3.CustomAttributes.Add(new CodeAttributeDeclaration(string.Empty)); yield return new object[] { invalidMethodAttribute3 }; var invalidMethodAttribute4 = new CodeMemberMethod { Name = "name" }; invalidMethodAttribute4.CustomAttributes.Add(new CodeAttributeDeclaration("0")); yield return new object[] { invalidMethodAttribute4 }; var invalidMethodAttribute5 = new CodeMemberMethod { Name = "name" }; invalidMethodAttribute5.CustomAttributes.Add(new CodeAttributeDeclaration("name", new CodeAttributeArgument("0", new CodePrimitiveExpression(1)))); yield return new object[] { invalidMethodAttribute5 }; var invalidMethodAttribute6 = new CodeMemberMethod { Name = "name" }; invalidMethodAttribute6.CustomAttributes.Add(new CodeAttributeDeclaration("name", new CodeAttributeArgument("name", new CodeExpression()))); yield return new object[] { invalidMethodAttribute6 }; var invalidMethodStartDirective1 = new CodeMemberMethod { Name = "name" }; invalidMethodStartDirective1.StartDirectives.Add(new CodeDirective()); yield return new object[] { invalidMethodStartDirective1 }; var invalidMethodStartDirective2 = new CodeMemberMethod { Name = "name" }; invalidMethodStartDirective2.StartDirectives.Add(new CodeChecksumPragma("\0", Guid.NewGuid(), new byte[0])); yield return new object[] { invalidMethodStartDirective2 }; foreach (char newLineChar in new char[] { '\r', '\n', '\u2028', '\u2029', '\u0085' }) { var invalidMethodStartDirective3 = new CodeMemberMethod { Name = "name" }; invalidMethodStartDirective3.StartDirectives.Add(new CodeRegionDirective(CodeRegionMode.None, $"te{newLineChar}xt")); yield return new object[] { invalidMethodStartDirective3 }; } var invalidMethodEndDirective1 = new CodeMemberMethod { Name = "name" }; invalidMethodEndDirective1.EndDirectives.Add(new CodeDirective()); yield return new object[] { invalidMethodEndDirective1 }; var invalidMethodEndDirective2 = new CodeMemberMethod { Name = "name" }; invalidMethodEndDirective2.EndDirectives.Add(new CodeChecksumPragma("\0", Guid.NewGuid(), new byte[0])); yield return new object[] { invalidMethodEndDirective2 }; foreach (char newLineChar in new char[] { '\r', '\n', '\u2028', '\u2029', '\u0085' }) { var invalidMethodEndDirective3 = new CodeMemberMethod { Name = "name" }; invalidMethodEndDirective3.EndDirectives.Add(new CodeRegionDirective(CodeRegionMode.None, $"te{newLineChar}xt")); yield return new object[] { invalidMethodEndDirective3 }; } var invalidMethodImplementationType1 = new CodeMemberMethod { Name = "name" }; invalidMethodImplementationType1.ImplementationTypes.Add(new CodeTypeReference()); yield return new object[] { invalidMethodImplementationType1 }; var invalidMethodImplementationType2 = new CodeMemberMethod { Name = "name" }; invalidMethodImplementationType2.ImplementationTypes.Add(new CodeTypeReference("0")); yield return new object[] { invalidMethodImplementationType2 }; var invalidMethodImplementationType3 = new CodeMemberMethod { Name = "name" }; invalidMethodImplementationType3.ImplementationTypes.Add(invalidTypeReference1); yield return new object[] { invalidMethodImplementationType3 }; var invalidMethodImplementationType4 = new CodeMemberMethod { Name = "name" }; invalidMethodImplementationType4.ImplementationTypes.Add(invalidTypeReference2); yield return new object[] { invalidMethodImplementationType4 }; var invalidMethodImplementationType5 = new CodeMemberMethod { Name = "name" }; invalidMethodImplementationType5.ImplementationTypes.Add(invalidTypeReference3); yield return new object[] { invalidMethodImplementationType5 }; var invalidMethodReturnTypeAttribute1 = new CodeMemberMethod { Name = "name" }; invalidMethodReturnTypeAttribute1.ReturnTypeCustomAttributes.Add(new CodeAttributeDeclaration()); yield return new object[] { invalidMethodReturnTypeAttribute1 }; var invalidMethodReturnTypeAttribute2 = new CodeMemberMethod { Name = "name" }; invalidMethodReturnTypeAttribute2.ReturnTypeCustomAttributes.Add(new CodeAttributeDeclaration((string)null)); yield return new object[] { invalidMethodReturnTypeAttribute2 }; var invalidMethodReturnTypeAttribute3 = new CodeMemberMethod { Name = "name" }; invalidMethodReturnTypeAttribute3.ReturnTypeCustomAttributes.Add(new CodeAttributeDeclaration(string.Empty)); yield return new object[] { invalidMethodReturnTypeAttribute3 }; var invalidMethodReturnTypeAttribute4 = new CodeMemberMethod { Name = "name" }; invalidMethodReturnTypeAttribute4.ReturnTypeCustomAttributes.Add(new CodeAttributeDeclaration("0")); yield return new object[] { invalidMethodReturnTypeAttribute4 }; var invalidMethodReturnTypeAttribute5 = new CodeMemberMethod { Name = "name" }; invalidMethodReturnTypeAttribute5.ReturnTypeCustomAttributes.Add(new CodeAttributeDeclaration("name", new CodeAttributeArgument("0", new CodePrimitiveExpression(1)))); yield return new object[] { invalidMethodReturnTypeAttribute5 }; var invalidMethodReturnTypeAttribute6 = new CodeMemberMethod { Name = "name" }; invalidMethodReturnTypeAttribute6.ReturnTypeCustomAttributes.Add(new CodeAttributeDeclaration("name", new CodeAttributeArgument("name", new CodeExpression()))); yield return new object[] { invalidMethodReturnTypeAttribute6 }; var invalidMethodStatement = new CodeMemberMethod { Name = "name" }; invalidMethodStatement.Statements.Add(new CodeStatement()); yield return new object[] { invalidMethodStatement }; var invalidMethodParameter1 = new CodeMemberMethod { Name = "name" }; invalidMethodParameter1.Parameters.Add(new CodeParameterDeclarationExpression()); yield return new object[] { invalidMethodParameter1 }; var invalidMethodParameter2 = new CodeMemberMethod { Name = "name" }; invalidMethodParameter2.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference(), "name")); yield return new object[] { invalidMethodParameter2 }; var invalidMethodParameter3 = new CodeMemberMethod { Name = "name" }; invalidMethodParameter3.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference("0"), "name")); yield return new object[] { invalidMethodParameter3 }; var invalidMethodParameter4 = new CodeMemberMethod { Name = "name" }; invalidMethodParameter4.Parameters.Add(new CodeParameterDeclarationExpression(invalidTypeReference1, "name")); yield return new object[] { invalidMethodParameter4 }; var invalidMethodParameter5 = new CodeMemberMethod { Name = "name" }; invalidMethodParameter5.Parameters.Add(new CodeParameterDeclarationExpression(invalidTypeReference2, "name")); yield return new object[] { invalidMethodParameter5 }; var invalidMethodParameter6 = new CodeMemberMethod { Name = "name" }; invalidMethodParameter6.Parameters.Add(new CodeParameterDeclarationExpression(invalidTypeReference3, "name")); yield return new object[] { invalidMethodParameter6 }; var invalidMethodParameter7 = new CodeMemberMethod { Name = "name" }; invalidMethodParameter7.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference("type"), null)); yield return new object[] { invalidMethodParameter7 }; var invalidMethodParameter8 = new CodeMemberMethod { Name = "name" }; invalidMethodParameter8.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference("type"), string.Empty)); yield return new object[] { invalidMethodParameter8 }; var invalidMethodParameter9 = new CodeMemberMethod { Name = "name" }; invalidMethodParameter9.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference("type"), "0")); yield return new object[] { invalidMethodParameter9 }; var invalidMethodParameterAttribute1 = new CodeMemberMethod { Name = "name" }; invalidMethodParameterAttribute1.Parameters.Add(invalidParameterAttribute1); yield return new object[] { invalidMethodParameterAttribute1 }; var invalidMethodParameterAttribute2 = new CodeMemberMethod { Name = "name" }; invalidMethodParameterAttribute2.Parameters.Add(invalidParameterAttribute2); yield return new object[] { invalidMethodParameterAttribute2 }; var invalidMethodParameterAttribute3 = new CodeMemberMethod { Name = "name" }; invalidMethodParameterAttribute3.Parameters.Add(invalidParameterAttribute3); yield return new object[] { invalidMethodParameterAttribute3 }; var invalidMethodParameterAttribute4 = new CodeMemberMethod { Name = "name" }; invalidMethodParameterAttribute4.Parameters.Add(invalidParameterAttribute4); yield return new object[] { invalidMethodParameterAttribute4 }; var invalidMethodParameterAttribute5 = new CodeMemberMethod { Name = "name" }; invalidMethodParameterAttribute5.Parameters.Add(invalidParameterAttribute5); yield return new object[] { invalidMethodParameterAttribute5 }; var invalidMethodParameterAttribute6 = new CodeMemberMethod { Name = "name" }; invalidMethodParameterAttribute6.Parameters.Add(invalidParameterAttribute6); yield return new object[] { invalidMethodParameterAttribute6 }; var invalidMethodTypeParameter1 = new CodeMemberMethod { Name = "name" }; invalidMethodTypeParameter1.TypeParameters.Add(new CodeTypeParameter()); yield return new object[] { invalidMethodTypeParameter1 }; var invalidMethodTypeParameter2 = new CodeMemberMethod { Name = "name" }; invalidMethodTypeParameter2.TypeParameters.Add(new CodeTypeParameter(null)); yield return new object[] { invalidMethodTypeParameter2 }; var invalidMethodTypeParameter3 = new CodeMemberMethod { Name = "name" }; invalidMethodTypeParameter3.TypeParameters.Add(new CodeTypeParameter(string.Empty)); yield return new object[] { invalidMethodTypeParameter3 }; var invalidMethodTypeParameter4 = new CodeMemberMethod { Name = "name" }; invalidMethodTypeParameter4.TypeParameters.Add(new CodeTypeParameter("0")); yield return new object[] { invalidMethodTypeParameter4 }; var invalidTypeParameterAttribute1 = new CodeTypeParameter("parameter"); invalidTypeParameterAttribute1.CustomAttributes.Add(new CodeAttributeDeclaration()); var invalidMethodTypeParameterAttribute1 = new CodeMemberMethod { Name = "name" }; invalidMethodTypeParameterAttribute1.TypeParameters.Add(invalidTypeParameterAttribute1); yield return new object[] { invalidMethodTypeParameterAttribute1 }; var invalidTypeParameterAttribute2 = new CodeTypeParameter("parameter"); invalidTypeParameterAttribute2.CustomAttributes.Add(new CodeAttributeDeclaration((string)null)); var invalidMethodTypeParameterAttribute2 = new CodeMemberMethod { Name = "name" }; invalidMethodTypeParameterAttribute2.TypeParameters.Add(invalidTypeParameterAttribute2); yield return new object[] { invalidMethodTypeParameterAttribute2 }; var invalidTypeParameterAttribute3 = new CodeTypeParameter("parameter"); invalidTypeParameterAttribute3.CustomAttributes.Add(new CodeAttributeDeclaration(string.Empty)); var invalidMethodTypeParameterAttribute3 = new CodeMemberMethod { Name = "name" }; invalidMethodTypeParameterAttribute3.TypeParameters.Add(invalidTypeParameterAttribute3); yield return new object[] { invalidMethodTypeParameterAttribute3 }; var invalidTypeParameterAttribute4 = new CodeTypeParameter("parameter"); invalidTypeParameterAttribute4.CustomAttributes.Add(new CodeAttributeDeclaration("0")); var invalidMethodTypeParameterAttribute4 = new CodeMemberMethod { Name = "name" }; invalidMethodTypeParameterAttribute4.TypeParameters.Add(invalidTypeParameterAttribute4); yield return new object[] { invalidMethodTypeParameterAttribute4 }; var invalidTypeParameterAttribute5 = new CodeTypeParameter("parameter"); invalidTypeParameterAttribute5.CustomAttributes.Add(new CodeAttributeDeclaration("attribute", new CodeAttributeArgument("0", new CodePrimitiveExpression(1)))); var invalidMethodTypeParameterAttribute5 = new CodeMemberMethod { Name = "name" }; invalidMethodTypeParameterAttribute5.TypeParameters.Add(invalidTypeParameterAttribute5); yield return new object[] { invalidMethodTypeParameterAttribute5 }; var invalidTypeParameterAttribute6 = new CodeTypeParameter("parameter"); invalidTypeParameterAttribute6.CustomAttributes.Add(new CodeAttributeDeclaration("attribute", new CodeAttributeArgument("ARG", new CodeExpression()))); var invalidMethodTypeParameterAttribute6 = new CodeMemberMethod { Name = "name" }; invalidMethodTypeParameterAttribute6.TypeParameters.Add(invalidTypeParameterAttribute6); yield return new object[] { invalidMethodTypeParameterAttribute6 }; // CodeEntryPointMethod. var invalidEntryPointMethodStartDirective1 = new CodeEntryPointMethod { Name = "name" }; invalidEntryPointMethodStartDirective1.StartDirectives.Add(new CodeDirective()); yield return new object[] { invalidEntryPointMethodStartDirective1 }; var invalidEntryPointMethodStartDirective2 = new CodeEntryPointMethod { Name = "name" }; invalidEntryPointMethodStartDirective2.StartDirectives.Add(new CodeChecksumPragma("\0", Guid.NewGuid(), new byte[0])); yield return new object[] { invalidEntryPointMethodStartDirective2 }; foreach (char newLineChar in new char[] { '\r', '\n', '\u2028', '\u2029', '\u0085' }) { var invalidEntryPointMethodStartDirective3 = new CodeEntryPointMethod { Name = "name" }; invalidEntryPointMethodStartDirective3.StartDirectives.Add(new CodeRegionDirective(CodeRegionMode.None, $"te{newLineChar}xt")); yield return new object[] { invalidEntryPointMethodStartDirective3 }; } var invalidEntryPointMethodEndDirective1 = new CodeEntryPointMethod { Name = "name" }; invalidEntryPointMethodEndDirective1.EndDirectives.Add(new CodeDirective()); yield return new object[] { invalidEntryPointMethodEndDirective1 }; var invalidEntryPointMethodEndDirective2 = new CodeEntryPointMethod { Name = "name" }; invalidEntryPointMethodEndDirective2.EndDirectives.Add(new CodeChecksumPragma("\0", Guid.NewGuid(), new byte[0])); yield return new object[] { invalidEntryPointMethodEndDirective2 }; foreach (char newLineChar in new char[] { '\r', '\n', '\u2028', '\u2029', '\u0085' }) { var invalidEntryPointMethodEndDirective3 = new CodeEntryPointMethod { Name = "name" }; invalidEntryPointMethodEndDirective3.EndDirectives.Add(new CodeRegionDirective(CodeRegionMode.None, $"te{newLineChar}xt")); yield return new object[] { invalidEntryPointMethodEndDirective3 }; } var invalidEntryPointMethodImplementationType1 = new CodeEntryPointMethod { Name = "name" }; invalidEntryPointMethodImplementationType1.ImplementationTypes.Add(new CodeTypeReference()); yield return new object[] { invalidEntryPointMethodImplementationType1 }; var invalidEntryPointMethodImplementationType2 = new CodeEntryPointMethod { Name = "name" }; invalidEntryPointMethodImplementationType2.ImplementationTypes.Add(new CodeTypeReference("0")); yield return new object[] { invalidEntryPointMethodImplementationType2 }; var invalidEntryPointMethodImplementationType3 = new CodeEntryPointMethod { Name = "name" }; invalidEntryPointMethodImplementationType3.ImplementationTypes.Add(invalidTypeReference1); yield return new object[] { invalidEntryPointMethodImplementationType3 }; var invalidEntryPointMethodImplementationType4 = new CodeEntryPointMethod { Name = "name" }; invalidEntryPointMethodImplementationType4.ImplementationTypes.Add(invalidTypeReference2); yield return new object[] { invalidEntryPointMethodImplementationType4 }; var invalidEntryPointMethodImplementationType5 = new CodeEntryPointMethod { Name = "name" }; invalidEntryPointMethodImplementationType5.ImplementationTypes.Add(invalidTypeReference3); yield return new object[] { invalidEntryPointMethodImplementationType5 }; var invalidEntryPointMethodStatement1 = new CodeEntryPointMethod { Name = "name" }; invalidEntryPointMethodStatement1.Statements.Add(new CodeStatement()); yield return new object[] { invalidEntryPointMethodStatement1 }; var invalidEntryPointMethodStatement2 = new CodeEntryPointMethod { Name = "name", Attributes = MemberAttributes.Abstract }; invalidEntryPointMethodStatement2.Statements.Add(new CodeStatement()); yield return new object[] { invalidEntryPointMethodStatement2 }; var invalidEntryPointMethodTypeParameter1 = new CodeEntryPointMethod { Name = "name" }; invalidEntryPointMethodTypeParameter1.TypeParameters.Add(new CodeTypeParameter()); yield return new object[] { invalidEntryPointMethodTypeParameter1 }; var invalidEntryPointMethodTypeParameter2 = new CodeEntryPointMethod { Name = "name" }; invalidEntryPointMethodTypeParameter2.TypeParameters.Add(new CodeTypeParameter(null)); yield return new object[] { invalidEntryPointMethodTypeParameter2 }; var invalidEntryPointMethodTypeParameter3 = new CodeEntryPointMethod { Name = "name" }; invalidEntryPointMethodTypeParameter3.TypeParameters.Add(new CodeTypeParameter(string.Empty)); yield return new object[] { invalidEntryPointMethodTypeParameter3 }; var invalidEntryPointMethodTypeParameter4 = new CodeEntryPointMethod { Name = "name" }; invalidEntryPointMethodTypeParameter4.TypeParameters.Add(new CodeTypeParameter("0")); yield return new object[] { invalidEntryPointMethodTypeParameter4 }; var invalidEntryPointMethodTypeParameterAttribute1 = new CodeEntryPointMethod { Name = "name" }; invalidEntryPointMethodTypeParameterAttribute1.TypeParameters.Add(invalidTypeParameterAttribute1); yield return new object[] { invalidEntryPointMethodTypeParameterAttribute1 }; var invalidEntryPointMethodTypeParameterAttribute2 = new CodeEntryPointMethod { Name = "name" }; invalidEntryPointMethodTypeParameterAttribute2.TypeParameters.Add(invalidTypeParameterAttribute2); yield return new object[] { invalidEntryPointMethodTypeParameterAttribute2 }; var invalidEntryPointMethodTypeParameterAttribute3 = new CodeEntryPointMethod { Name = "name" }; invalidEntryPointMethodTypeParameterAttribute3.TypeParameters.Add(invalidTypeParameterAttribute3); yield return new object[] { invalidEntryPointMethodTypeParameterAttribute3 }; var invalidEntryPointMethodTypeParameterAttribute4 = new CodeEntryPointMethod { Name = "name" }; invalidEntryPointMethodTypeParameterAttribute4.TypeParameters.Add(invalidTypeParameterAttribute4); yield return new object[] { invalidEntryPointMethodTypeParameterAttribute4 }; var invalidEntryPointMethodTypeParameterAttribute5 = new CodeEntryPointMethod { Name = "name" }; invalidEntryPointMethodTypeParameterAttribute5.TypeParameters.Add(invalidTypeParameterAttribute5); yield return new object[] { invalidEntryPointMethodTypeParameterAttribute5 }; var invalidEntryPointMethodTypeParameterAttribute6 = new CodeEntryPointMethod { Name = "name" }; invalidEntryPointMethodTypeParameterAttribute6.TypeParameters.Add(invalidTypeParameterAttribute6); yield return new object[] { invalidEntryPointMethodTypeParameterAttribute6 }; // CodeConstructor. var invalidConstructorAttribute1 = new CodeConstructor { Name = "name" }; invalidConstructorAttribute1.CustomAttributes.Add(new CodeAttributeDeclaration()); yield return new object[] { invalidConstructorAttribute1 }; var invalidConstructorAttribute2 = new CodeConstructor { Name = "name" }; invalidConstructorAttribute2.CustomAttributes.Add(new CodeAttributeDeclaration((string)null)); yield return new object[] { invalidConstructorAttribute2 }; var invalidConstructorAttribute3 = new CodeConstructor { Name = "name" }; invalidConstructorAttribute3.CustomAttributes.Add(new CodeAttributeDeclaration(string.Empty)); yield return new object[] { invalidConstructorAttribute3 }; var invalidConstructorAttribute4 = new CodeConstructor { Name = "name" }; invalidConstructorAttribute4.CustomAttributes.Add(new CodeAttributeDeclaration("0")); yield return new object[] { invalidConstructorAttribute4 }; var invalidConstructorAttribute5 = new CodeConstructor { Name = "name" }; invalidConstructorAttribute5.CustomAttributes.Add(new CodeAttributeDeclaration("name", new CodeAttributeArgument("0", new CodePrimitiveExpression(1)))); yield return new object[] { invalidConstructorAttribute5 }; var invalidConstructorAttribute6 = new CodeConstructor { Name = "name" }; invalidConstructorAttribute6.CustomAttributes.Add(new CodeAttributeDeclaration("name", new CodeAttributeArgument("name", new CodeExpression()))); yield return new object[] { invalidConstructorAttribute6 }; var invalidConstructorStartDirective1 = new CodeConstructor { Name = "name" }; invalidConstructorStartDirective1.StartDirectives.Add(new CodeDirective()); yield return new object[] { invalidConstructorStartDirective1 }; var invalidConstructorStartDirective2 = new CodeConstructor { Name = "name" }; invalidConstructorStartDirective2.StartDirectives.Add(new CodeChecksumPragma("\0", Guid.NewGuid(), new byte[0])); yield return new object[] { invalidConstructorStartDirective2 }; foreach (char newLineChar in new char[] { '\r', '\n', '\u2028', '\u2029', '\u0085' }) { var invalidConstructorStartDirective3 = new CodeConstructor { Name = "name" }; invalidConstructorStartDirective3.StartDirectives.Add(new CodeRegionDirective(CodeRegionMode.None, $"te{newLineChar}xt")); yield return new object[] { invalidConstructorStartDirective3 }; } var invalidConstructorEndDirective1 = new CodeConstructor { Name = "name" }; invalidConstructorEndDirective1.EndDirectives.Add(new CodeDirective()); yield return new object[] { invalidConstructorEndDirective1 }; var invalidConstructorEndDirective2 = new CodeConstructor { Name = "name" }; invalidConstructorEndDirective2.EndDirectives.Add(new CodeChecksumPragma("\0", Guid.NewGuid(), new byte[0])); yield return new object[] { invalidConstructorEndDirective2 }; foreach (char newLineChar in new char[] { '\r', '\n', '\u2028', '\u2029', '\u0085' }) { var invalidConstructorEndDirective3 = new CodeConstructor { Name = "name" }; invalidConstructorEndDirective3.EndDirectives.Add(new CodeRegionDirective(CodeRegionMode.None, $"te{newLineChar}xt")); yield return new object[] { invalidConstructorEndDirective3 }; } var invalidConstructorImplementationType1 = new CodeConstructor { Name = "name" }; invalidConstructorImplementationType1.ImplementationTypes.Add(new CodeTypeReference()); yield return new object[] { invalidConstructorImplementationType1 }; var invalidConstructorImplementationType2 = new CodeConstructor { Name = "name" }; invalidConstructorImplementationType2.ImplementationTypes.Add(new CodeTypeReference("0")); yield return new object[] { invalidConstructorImplementationType2 }; var invalidConstructorImplementationType3 = new CodeConstructor { Name = "name" }; invalidConstructorImplementationType3.ImplementationTypes.Add(invalidTypeReference1); yield return new object[] { invalidConstructorImplementationType3 }; var invalidConstructorImplementationType4 = new CodeConstructor { Name = "name" }; invalidConstructorImplementationType4.ImplementationTypes.Add(invalidTypeReference2); yield return new object[] { invalidConstructorImplementationType4 }; var invalidConstructorImplementationType5 = new CodeConstructor { Name = "name" }; invalidConstructorImplementationType5.ImplementationTypes.Add(invalidTypeReference3); yield return new object[] { invalidConstructorImplementationType5 }; var invalidConstructorStatement1 = new CodeConstructor { Name = "name" }; invalidConstructorStatement1.Statements.Add(new CodeStatement()); yield return new object[] { invalidConstructorStatement1 }; var invalidConstructorStatement2 = new CodeConstructor { Name = "name", Attributes = MemberAttributes.Abstract }; invalidConstructorStatement2.Statements.Add(new CodeStatement()); yield return new object[] { invalidConstructorStatement2 }; var invalidConstructorParameter1 = new CodeConstructor { Name = "name" }; invalidConstructorParameter1.Parameters.Add(new CodeParameterDeclarationExpression()); yield return new object[] { invalidConstructorParameter1 }; var invalidConstructorParameter2 = new CodeConstructor { Name = "name" }; invalidConstructorParameter2.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference(), "name")); yield return new object[] { invalidConstructorParameter2 }; var invalidConstructorParameter3 = new CodeConstructor { Name = "name" }; invalidConstructorParameter3.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference("0"), "name")); yield return new object[] { invalidConstructorParameter3 }; var invalidConstructorParameter4 = new CodeConstructor { Name = "name" }; invalidConstructorParameter4.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference("type"), null)); yield return new object[] { invalidConstructorParameter4 }; var invalidConstructorParameter5 = new CodeConstructor { Name = "name" }; invalidConstructorParameter5.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference("type"), string.Empty)); yield return new object[] { invalidConstructorParameter5 }; var invalidConstructorParameter6 = new CodeConstructor { Name = "name" }; invalidConstructorParameter6.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference("type"), "0")); yield return new object[] { invalidConstructorParameter6 }; var invalidConstructorParameterAttribute1 = new CodeConstructor { Name = "name" }; invalidConstructorParameterAttribute1.Parameters.Add(invalidParameterAttribute1); yield return new object[] { invalidConstructorParameterAttribute1 }; var invalidConstructorParameterAttribute2 = new CodeConstructor { Name = "name" }; invalidConstructorParameterAttribute2.Parameters.Add(invalidParameterAttribute2); yield return new object[] { invalidConstructorParameterAttribute2 }; var invalidConstructorParameterAttribute3 = new CodeConstructor { Name = "name" }; invalidConstructorParameterAttribute3.Parameters.Add(invalidParameterAttribute3); yield return new object[] { invalidConstructorParameterAttribute3 }; var invalidConstructorParameterAttribute4 = new CodeConstructor { Name = "name" }; invalidConstructorParameterAttribute4.Parameters.Add(invalidParameterAttribute4); yield return new object[] { invalidConstructorParameterAttribute4 }; var invalidConstructorParameterAttribute5 = new CodeConstructor { Name = "name" }; invalidConstructorParameterAttribute5.Parameters.Add(invalidParameterAttribute5); yield return new object[] { invalidConstructorParameterAttribute5 }; var invalidConstructorParameterAttribute6 = new CodeConstructor { Name = "name" }; invalidConstructorParameterAttribute6.Parameters.Add(invalidParameterAttribute6); yield return new object[] { invalidConstructorParameterAttribute6 }; var invalidConstructorTypeParameter1 = new CodeConstructor { Name = "name" }; invalidConstructorTypeParameter1.TypeParameters.Add(new CodeTypeParameter()); yield return new object[] { invalidConstructorTypeParameter1 }; var invalidConstructorTypeParameter2 = new CodeConstructor { Name = "name" }; invalidConstructorTypeParameter2.TypeParameters.Add(new CodeTypeParameter(null)); yield return new object[] { invalidConstructorTypeParameter2 }; var invalidConstructorTypeParameter3 = new CodeConstructor { Name = "name" }; invalidConstructorTypeParameter3.TypeParameters.Add(new CodeTypeParameter(string.Empty)); yield return new object[] { invalidConstructorTypeParameter3 }; var invalidConstructorTypeParameter4 = new CodeConstructor { Name = "name" }; invalidConstructorTypeParameter4.TypeParameters.Add(new CodeTypeParameter("0")); yield return new object[] { invalidConstructorTypeParameter4 }; var invalidConstructorTypeParameterAttribute1 = new CodeConstructor { Name = "name" }; invalidConstructorTypeParameterAttribute1.TypeParameters.Add(invalidTypeParameterAttribute1); yield return new object[] { invalidConstructorTypeParameterAttribute1 }; var invalidConstructorTypeParameterAttribute2 = new CodeConstructor { Name = "name" }; invalidConstructorTypeParameterAttribute2.TypeParameters.Add(invalidTypeParameterAttribute2); yield return new object[] { invalidConstructorTypeParameterAttribute2 }; var invalidConstructorTypeParameterAttribute3 = new CodeConstructor { Name = "name" }; invalidConstructorTypeParameterAttribute3.TypeParameters.Add(invalidTypeParameterAttribute3); yield return new object[] { invalidConstructorTypeParameterAttribute3 }; var invalidConstructorTypeParameterAttribute4 = new CodeConstructor { Name = "name" }; invalidConstructorTypeParameterAttribute4.TypeParameters.Add(invalidTypeParameterAttribute4); yield return new object[] { invalidConstructorTypeParameterAttribute4 }; var invalidConstructorTypeParameterAttribute5 = new CodeConstructor { Name = "name" }; invalidConstructorTypeParameterAttribute5.TypeParameters.Add(invalidTypeParameterAttribute5); yield return new object[] { invalidConstructorTypeParameterAttribute5 }; var invalidConstructorTypeParameterAttribute6 = new CodeConstructor { Name = "name" }; invalidConstructorTypeParameterAttribute6.TypeParameters.Add(invalidTypeParameterAttribute6); yield return new object[] { invalidConstructorTypeParameterAttribute6 }; var invalidConstructorBaseConstructorArg = new CodeConstructor { Name = "name" }; invalidConstructorBaseConstructorArg.BaseConstructorArgs.Add(new CodeExpression()); yield return new object[] { invalidConstructorBaseConstructorArg }; var invalidConstructorChainedConstructorArg = new CodeConstructor { Name = "name" }; invalidConstructorChainedConstructorArg.ChainedConstructorArgs.Add(new CodeExpression()); yield return new object[] { invalidConstructorChainedConstructorArg }; // CodeTypeConstructor. var invalidTypeConstructorStartDirective1 = new CodeTypeConstructor { Name = "name" }; invalidTypeConstructorStartDirective1.StartDirectives.Add(new CodeDirective()); yield return new object[] { invalidTypeConstructorStartDirective1 }; var invalidTypeConstructorStartDirective2 = new CodeTypeConstructor { Name = "name" }; invalidTypeConstructorStartDirective2.StartDirectives.Add(new CodeChecksumPragma("\0", Guid.NewGuid(), new byte[0])); yield return new object[] { invalidTypeConstructorStartDirective2 }; foreach (char newLineChar in new char[] { '\r', '\n', '\u2028', '\u2029', '\u0085' }) { var invalidTypeConstructorStartDirective3 = new CodeTypeConstructor { Name = "name" }; invalidTypeConstructorStartDirective3.StartDirectives.Add(new CodeRegionDirective(CodeRegionMode.None, $"te{newLineChar}xt")); yield return new object[] { invalidTypeConstructorStartDirective3 }; } var invalidTypeConstructorEndDirective1 = new CodeTypeConstructor { Name = "name" }; invalidTypeConstructorEndDirective1.EndDirectives.Add(new CodeDirective()); yield return new object[] { invalidTypeConstructorEndDirective1 }; var invalidTypeConstructorEndDirective2 = new CodeTypeConstructor { Name = "name" }; invalidTypeConstructorEndDirective2.EndDirectives.Add(new CodeChecksumPragma("\0", Guid.NewGuid(), new byte[0])); yield return new object[] { invalidTypeConstructorEndDirective2 }; foreach (char newLineChar in new char[] { '\r', '\n', '\u2028', '\u2029', '\u0085' }) { var invalidTypeConstructorEndDirective3 = new CodeTypeConstructor { Name = "name" }; invalidTypeConstructorEndDirective3.EndDirectives.Add(new CodeRegionDirective(CodeRegionMode.None, $"te{newLineChar}xt")); yield return new object[] { invalidTypeConstructorEndDirective3 }; } var invalidTypeConstructorImplementationType1 = new CodeTypeConstructor { Name = "name" }; invalidTypeConstructorImplementationType1.ImplementationTypes.Add(new CodeTypeReference()); yield return new object[] { invalidTypeConstructorImplementationType1 }; var invalidTypeConstructorImplementationType2 = new CodeTypeConstructor { Name = "name" }; invalidTypeConstructorImplementationType2.ImplementationTypes.Add(new CodeTypeReference("0")); yield return new object[] { invalidTypeConstructorImplementationType2 }; var invalidTypeConstructorImplementationType3 = new CodeTypeConstructor { Name = "name" }; invalidTypeConstructorImplementationType3.ImplementationTypes.Add(invalidTypeReference1); yield return new object[] { invalidTypeConstructorImplementationType3 }; var invalidTypeConstructorImplementationType4 = new CodeTypeConstructor { Name = "name" }; invalidTypeConstructorImplementationType4.ImplementationTypes.Add(invalidTypeReference2); yield return new object[] { invalidTypeConstructorImplementationType4 }; var invalidTypeConstructorImplementationType5 = new CodeTypeConstructor { Name = "name" }; invalidTypeConstructorImplementationType5.ImplementationTypes.Add(invalidTypeReference3); yield return new object[] { invalidTypeConstructorImplementationType5 }; var invalidTypeConstructorStatement1 = new CodeTypeConstructor { Name = "name" }; invalidTypeConstructorStatement1.Statements.Add(new CodeStatement()); yield return new object[] { invalidTypeConstructorStatement1 }; var invalidTypeConstructorStatement2 = new CodeTypeConstructor { Name = "name", Attributes = MemberAttributes.Abstract }; invalidTypeConstructorStatement2.Statements.Add(new CodeStatement()); yield return new object[] { invalidTypeConstructorStatement2 }; var invalidTypeConstructorTypeParameter1 = new CodeTypeConstructor { Name = "name" }; invalidTypeConstructorTypeParameter1.TypeParameters.Add(new CodeTypeParameter()); yield return new object[] { invalidTypeConstructorTypeParameter1 }; var invalidTypeConstructorTypeParameter2 = new CodeTypeConstructor { Name = "name" }; invalidTypeConstructorTypeParameter2.TypeParameters.Add(new CodeTypeParameter(null)); yield return new object[] { invalidTypeConstructorTypeParameter2 }; var invalidTypeConstructorTypeParameter3 = new CodeTypeConstructor { Name = "name" }; invalidTypeConstructorTypeParameter3.TypeParameters.Add(new CodeTypeParameter(string.Empty)); yield return new object[] { invalidTypeConstructorTypeParameter3 }; var invalidTypeConstructorTypeParameter4 = new CodeTypeConstructor { Name = "name" }; invalidTypeConstructorTypeParameter4.TypeParameters.Add(new CodeTypeParameter("0")); yield return new object[] { invalidTypeConstructorTypeParameter4 }; var invalidTypeConstructorTypeParameterAttribute1 = new CodeTypeConstructor { Name = "name" }; invalidTypeConstructorTypeParameterAttribute1.TypeParameters.Add(invalidTypeParameterAttribute1); yield return new object[] { invalidTypeConstructorTypeParameterAttribute1 }; var invalidTypeConstructorTypeParameterAttribute2 = new CodeTypeConstructor { Name = "name" }; invalidTypeConstructorTypeParameterAttribute2.TypeParameters.Add(invalidTypeParameterAttribute2); yield return new object[] { invalidTypeConstructorTypeParameterAttribute2 }; var invalidTypeConstructorTypeParameterAttribute3 = new CodeTypeConstructor { Name = "name" }; invalidTypeConstructorTypeParameterAttribute3.TypeParameters.Add(invalidTypeParameterAttribute3); yield return new object[] { invalidTypeConstructorTypeParameterAttribute3 }; var invalidTypeConstructorTypeParameterAttribute4 = new CodeTypeConstructor { Name = "name" }; invalidTypeConstructorTypeParameterAttribute4.TypeParameters.Add(invalidTypeParameterAttribute4); yield return new object[] { invalidTypeConstructorTypeParameterAttribute4 }; var invalidTypeConstructorTypeParameterAttribute5 = new CodeTypeConstructor { Name = "name" }; invalidTypeConstructorTypeParameterAttribute5.TypeParameters.Add(invalidTypeParameterAttribute5); yield return new object[] { invalidTypeConstructorTypeParameterAttribute5 }; var invalidTypeConstructorTypeParameterAttribute6 = new CodeTypeConstructor { Name = "name" }; invalidTypeConstructorTypeParameterAttribute6.TypeParameters.Add(invalidTypeParameterAttribute6); yield return new object[] { invalidTypeConstructorTypeParameterAttribute6 }; // CodeMemberProperty. yield return new object[] { new CodeMemberProperty() }; yield return new object[] { new CodeMemberProperty { Name = null } }; yield return new object[] { new CodeMemberProperty { Name = string.Empty } }; yield return new object[] { new CodeMemberProperty { Name = "0" } }; yield return new object[] { new CodeMemberProperty { Name = "name", PrivateImplementationType = new CodeTypeReference() } }; yield return new object[] { new CodeMemberProperty { Name = "name", PrivateImplementationType = new CodeTypeReference("0") } }; yield return new object[] { new CodeMemberProperty { Name = "name", PrivateImplementationType = invalidTypeReference1 } }; yield return new object[] { new CodeMemberProperty { Name = "name", PrivateImplementationType = invalidTypeReference2 } }; yield return new object[] { new CodeMemberProperty { Name = "name", PrivateImplementationType = invalidTypeReference3 } }; var invalidPropertyStartDirective1 = new CodeMemberProperty { Name = "name" }; invalidPropertyStartDirective1.StartDirectives.Add(new CodeDirective()); yield return new object[] { invalidPropertyStartDirective1 }; var invalidPropertyStartDirective2 = new CodeMemberProperty { Name = "name" }; invalidPropertyStartDirective2.StartDirectives.Add(new CodeChecksumPragma("\0", Guid.NewGuid(), new byte[0])); yield return new object[] { invalidPropertyStartDirective2 }; foreach (char newLineChar in new char[] { '\r', '\n', '\u2028', '\u2029', '\u0085' }) { var invalidPropertyStartDirective3 = new CodeMemberProperty { Name = "name" }; invalidPropertyStartDirective3.StartDirectives.Add(new CodeRegionDirective(CodeRegionMode.None, $"te{newLineChar}xt")); yield return new object[] { invalidPropertyStartDirective3 }; } var invalidPropertyEndDirective1 = new CodeMemberProperty { Name = "name" }; invalidPropertyEndDirective1.EndDirectives.Add(new CodeDirective()); yield return new object[] { invalidPropertyEndDirective1 }; var invalidPropertyEndDirective2 = new CodeMemberProperty { Name = "name" }; invalidPropertyEndDirective2.EndDirectives.Add(new CodeChecksumPragma("\0", Guid.NewGuid(), new byte[0])); yield return new object[] { invalidPropertyEndDirective2 }; foreach (char newLineChar in new char[] { '\r', '\n', '\u2028', '\u2029', '\u0085' }) { var invalidPropertyEndDirective3 = new CodeMemberProperty { Name = "name" }; invalidPropertyEndDirective3.EndDirectives.Add(new CodeRegionDirective(CodeRegionMode.None, $"te{newLineChar}xt")); yield return new object[] { invalidPropertyEndDirective3 }; } foreach (string name in new string[] { "item", "Item" }) { var invalidPropertyParameter1 = new CodeMemberProperty { Name = name }; invalidPropertyParameter1.Parameters.Add(new CodeParameterDeclarationExpression()); yield return new object[] { invalidPropertyParameter1 }; var invalidPropertyParameter2 = new CodeMemberProperty { Name = name }; invalidPropertyParameter2.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference(), "name")); yield return new object[] { invalidPropertyParameter2 }; var invalidPropertyParameter3 = new CodeMemberProperty { Name = name }; invalidPropertyParameter3.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference("0"), "name")); yield return new object[] { invalidPropertyParameter3 }; var invalidPropertyParameter4 = new CodeMemberProperty { Name = name }; invalidPropertyParameter4.Parameters.Add(new CodeParameterDeclarationExpression(invalidTypeReference1, "name")); yield return new object[] { invalidPropertyParameter4 }; var invalidPropertyParameter5 = new CodeMemberProperty { Name = name }; invalidPropertyParameter5.Parameters.Add(new CodeParameterDeclarationExpression(invalidTypeReference2, "name")); yield return new object[] { invalidPropertyParameter5 }; var invalidPropertyParameter6 = new CodeMemberProperty { Name = name }; invalidPropertyParameter6.Parameters.Add(new CodeParameterDeclarationExpression(invalidTypeReference3, "name")); yield return new object[] { invalidPropertyParameter6 }; var invalidPropertyParameter7 = new CodeMemberProperty { Name = name }; invalidPropertyParameter7.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference("type"), null)); yield return new object[] { invalidPropertyParameter7 }; var invalidPropertyParameter8 = new CodeMemberProperty { Name = name }; invalidPropertyParameter8.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference("type"), string.Empty)); yield return new object[] { invalidPropertyParameter8 }; var invalidPropertyParameter9 = new CodeMemberProperty { Name = name }; invalidPropertyParameter9.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference("type"), "0")); yield return new object[] { invalidPropertyParameter9 }; var invalidPropertyParameterAttribute1 = new CodeMemberProperty { Name = name }; invalidPropertyParameterAttribute1.Parameters.Add(invalidParameterAttribute1); yield return new object[] { invalidPropertyParameterAttribute1 }; var invalidPropertyParameterAttribute2 = new CodeMemberProperty { Name = name }; invalidPropertyParameterAttribute2.Parameters.Add(invalidParameterAttribute2); yield return new object[] { invalidPropertyParameterAttribute2 }; var invalidPropertyParameterAttribute3 = new CodeMemberProperty { Name = name }; invalidPropertyParameterAttribute3.Parameters.Add(invalidParameterAttribute3); yield return new object[] { invalidPropertyParameterAttribute3 }; var invalidPropertyParameterAttribute4 = new CodeMemberProperty { Name = name }; invalidPropertyParameterAttribute4.Parameters.Add(invalidParameterAttribute4); yield return new object[] { invalidPropertyParameterAttribute4 }; var invalidPropertyParameterAttribute5 = new CodeMemberProperty { Name = name }; invalidPropertyParameterAttribute5.Parameters.Add(invalidParameterAttribute5); yield return new object[] { invalidPropertyParameterAttribute5 }; var invalidPropertyParameterAttribute6 = new CodeMemberProperty { Name = name }; invalidPropertyParameterAttribute6.Parameters.Add(invalidParameterAttribute6); yield return new object[] { invalidPropertyParameterAttribute6 }; } var invalidPropertyImplementationType1 = new CodeMemberProperty { Name = "name" }; invalidPropertyImplementationType1.ImplementationTypes.Add(new CodeTypeReference()); yield return new object[] { invalidPropertyImplementationType1 }; var invalidPropertyImplementationType2 = new CodeMemberProperty { Name = "name" }; invalidPropertyImplementationType2.ImplementationTypes.Add(new CodeTypeReference("0")); yield return new object[] { invalidPropertyImplementationType2 }; var invalidPropertyImplementationType3 = new CodeMemberProperty { Name = "name" }; invalidPropertyImplementationType3.ImplementationTypes.Add(invalidTypeReference1); yield return new object[] { invalidPropertyImplementationType3 }; var invalidPropertyImplementationType4 = new CodeMemberProperty { Name = "name" }; invalidPropertyImplementationType4.ImplementationTypes.Add(invalidTypeReference2); yield return new object[] { invalidPropertyImplementationType4 }; var invalidPropertyImplementationType5 = new CodeMemberProperty { Name = "name" }; invalidPropertyImplementationType5.ImplementationTypes.Add(invalidTypeReference3); yield return new object[] { invalidPropertyImplementationType5 }; var invalidPropertyGetStatement = new CodeMemberProperty { Name = "name" }; invalidPropertyGetStatement.GetStatements.Add(new CodeStatement()); yield return new object[] { invalidPropertyGetStatement }; var invalidPropertySetStatement = new CodeMemberProperty { Name = "name" }; invalidPropertySetStatement.SetStatements.Add(new CodeStatement()); yield return new object[] { invalidPropertySetStatement }; // CodeTypeDeclaration. yield return new object[] { new CodeTypeDeclaration() }; yield return new object[] { new CodeTypeDeclaration(null) }; yield return new object[] { new CodeTypeDeclaration(string.Empty) }; yield return new object[] { new CodeTypeDeclaration("0") }; var invalidTypeAttribute1 = new CodeTypeDeclaration("name"); invalidTypeAttribute1.CustomAttributes.Add(new CodeAttributeDeclaration()); yield return new object[] { invalidTypeAttribute1 }; var invalidTypeAttribute2 = new CodeTypeDeclaration("name"); invalidTypeAttribute2.CustomAttributes.Add(new CodeAttributeDeclaration((string)null)); yield return new object[] { invalidTypeAttribute2 }; var invalidTypeAttribute3 = new CodeTypeDeclaration("name"); invalidTypeAttribute3.CustomAttributes.Add(new CodeAttributeDeclaration(string.Empty)); yield return new object[] { invalidTypeAttribute3 }; var invalidTypeAttribute4 = new CodeTypeDeclaration("name"); invalidTypeAttribute4.CustomAttributes.Add(new CodeAttributeDeclaration("0")); yield return new object[] { invalidTypeAttribute4 }; var invalidTypeAttribute5 = new CodeTypeDeclaration("name"); invalidTypeAttribute5.CustomAttributes.Add(new CodeAttributeDeclaration("name", new CodeAttributeArgument("0", new CodePrimitiveExpression(1)))); yield return new object[] { invalidTypeAttribute5 }; var invalidTypeAttribute6 = new CodeTypeDeclaration("name"); invalidTypeAttribute6.CustomAttributes.Add(new CodeAttributeDeclaration("name", new CodeAttributeArgument("name", new CodeExpression()))); yield return new object[] { invalidTypeAttribute6 }; var invalidTypeParameter1 = new CodeTypeDeclaration("name"); invalidTypeParameter1.TypeParameters.Add(new CodeTypeParameter()); yield return new object[] { invalidTypeParameter1 }; var invalidTypeParameter2 = new CodeTypeDeclaration("name"); invalidTypeParameter2.TypeParameters.Add(new CodeTypeParameter(null)); yield return new object[] { invalidTypeParameter2 }; var invalidTypeParameter3 = new CodeTypeDeclaration("name"); invalidTypeParameter3.TypeParameters.Add(new CodeTypeParameter(string.Empty)); yield return new object[] { invalidTypeParameter3 }; var invalidTypeParameter4 = new CodeTypeDeclaration("name"); invalidTypeParameter4.TypeParameters.Add(new CodeTypeParameter("0")); yield return new object[] { invalidTypeParameter4 }; var invalidTypeTypeParameterAttribute1 = new CodeTypeDeclaration("name"); invalidTypeTypeParameterAttribute1.TypeParameters.Add(invalidTypeParameterAttribute1); yield return new object[] { invalidTypeTypeParameterAttribute1 }; var invalidTypeTypeParameterAttribute2 = new CodeTypeDeclaration("name"); invalidTypeTypeParameterAttribute2.TypeParameters.Add(invalidTypeParameterAttribute2); yield return new object[] { invalidTypeTypeParameterAttribute2 }; var invalidTypeTypeParameterAttribute3 = new CodeTypeDeclaration("name"); invalidTypeTypeParameterAttribute3.TypeParameters.Add(invalidTypeParameterAttribute3); yield return new object[] { invalidTypeTypeParameterAttribute3 }; var invalidTypeTypeParameterAttribute4 = new CodeTypeDeclaration("name"); invalidTypeTypeParameterAttribute4.TypeParameters.Add(invalidTypeParameterAttribute4); yield return new object[] { invalidTypeTypeParameterAttribute4 }; var invalidTypeTypeParameterAttribute5 = new CodeTypeDeclaration("name"); invalidTypeTypeParameterAttribute5.TypeParameters.Add(invalidTypeParameterAttribute5); yield return new object[] { invalidTypeTypeParameterAttribute5 }; var invalidTypeTypeParameterAttribute6 = new CodeTypeDeclaration("name"); invalidTypeTypeParameterAttribute6.TypeParameters.Add(invalidTypeParameterAttribute6); yield return new object[] { invalidTypeTypeParameterAttribute6 }; var invalidParameterConstraint1 = new CodeTypeParameter("parameter"); invalidParameterConstraint1.Constraints.Add(new CodeTypeReference()); var invalidTypeParameterConstraint1 = new CodeTypeDeclaration("name"); invalidTypeParameterConstraint1.TypeParameters.Add(invalidParameterConstraint1); yield return new object[] { invalidTypeParameterConstraint1 }; var invalidParameterConstraint2 = new CodeTypeParameter("parameter"); invalidParameterConstraint2.Constraints.Add(new CodeTypeReference("0")); var invalidTypeParameterConstraint2 = new CodeTypeDeclaration("name"); invalidTypeParameterConstraint2.TypeParameters.Add(invalidParameterConstraint2); yield return new object[] { invalidTypeParameterConstraint2 }; var invalidParameterConstraint3 = new CodeTypeParameter("parameter"); invalidParameterConstraint3.Constraints.Add(invalidTypeReference1); var invalidTypeParameterConstraint3 = new CodeTypeDeclaration("name"); invalidTypeParameterConstraint3.TypeParameters.Add(invalidParameterConstraint3); yield return new object[] { invalidTypeParameterConstraint3 }; var invalidParameterConstraint4 = new CodeTypeParameter("parameter"); invalidParameterConstraint4.Constraints.Add(invalidTypeReference2); var invalidTypeParameterConstraint4 = new CodeTypeDeclaration("name"); invalidTypeParameterConstraint4.TypeParameters.Add(invalidParameterConstraint4); yield return new object[] { invalidTypeParameterConstraint4 }; var invalidParameterConstraint5 = new CodeTypeParameter("parameter"); invalidParameterConstraint5.Constraints.Add(invalidTypeReference3); var invalidTypeParameterConstraint5 = new CodeTypeDeclaration("name"); invalidTypeParameterConstraint5.TypeParameters.Add(invalidParameterConstraint5); yield return new object[] { invalidTypeParameterConstraint5 }; var invalidParameterConstraint6 = new CodeTypeParameter("parameter"); invalidParameterConstraint6.Constraints.Add(new CodeTypeReference("constraint`2", new CodeTypeReference("name"))); var invalidTypeParameterConstraint6 = new CodeTypeDeclaration("name"); invalidTypeParameterConstraint6.TypeParameters.Add(invalidParameterConstraint6); yield return new object[] { invalidTypeParameterConstraint6 }; var invalidParameterConstraint7 = new CodeTypeParameter("parameter"); invalidParameterConstraint7.Constraints.Add(new CodeTypeReference("constraint", new CodeTypeReference(), new CodeTypeReference("name"))); var invalidTypeParameterConstraint7 = new CodeTypeDeclaration("name"); invalidTypeParameterConstraint7.TypeParameters.Add(invalidParameterConstraint7); yield return new object[] { invalidTypeParameterConstraint7 }; var invalidParameterConstraint8 = new CodeTypeParameter("parameter"); invalidParameterConstraint8.Constraints.Add(new CodeTypeReference("constraint", new CodeTypeReference("0"), new CodeTypeReference("name"))); var invalidTypeParameterConstraint8 = new CodeTypeDeclaration("name"); invalidTypeParameterConstraint8.TypeParameters.Add(invalidParameterConstraint8); yield return new object[] { invalidTypeParameterConstraint8 }; var invalidParameterConstraint9 = new CodeTypeParameter("parameter"); invalidParameterConstraint9.Constraints.Add(new CodeTypeReference("constraint", invalidTypeReference1, new CodeTypeReference("name"))); var invalidTypeParameterConstraint9 = new CodeTypeDeclaration("name"); invalidTypeParameterConstraint9.TypeParameters.Add(invalidParameterConstraint9); yield return new object[] { invalidTypeParameterConstraint9 }; var invalidParameterConstraint10 = new CodeTypeParameter("parameter"); invalidParameterConstraint10.Constraints.Add(new CodeTypeReference("constraint", invalidTypeReference2, new CodeTypeReference("name"))); var invalidTypeParameterConstraint10 = new CodeTypeDeclaration("name"); invalidTypeParameterConstraint10.TypeParameters.Add(invalidParameterConstraint10); yield return new object[] { invalidTypeParameterConstraint10 }; var invalidParameterConstraint11 = new CodeTypeParameter("parameter"); invalidParameterConstraint11.Constraints.Add(new CodeTypeReference("constraint", invalidTypeReference3, new CodeTypeReference("name"))); var invalidTypeParameterConstraint11 = new CodeTypeDeclaration("name"); invalidTypeParameterConstraint11.TypeParameters.Add(invalidParameterConstraint11); yield return new object[] { invalidTypeParameterConstraint11 }; var invalidTypeBaseType1 = new CodeTypeDeclaration("name"); invalidTypeBaseType1.BaseTypes.Add(new CodeTypeReference()); yield return new object[] { invalidTypeBaseType1 }; var invalidTypeBaseType2 = new CodeTypeDeclaration("name"); invalidTypeBaseType2.BaseTypes.Add(new CodeTypeReference("0")); yield return new object[] { invalidTypeBaseType2 }; var invalidTypeBaseType3 = new CodeTypeDeclaration("name"); invalidTypeBaseType3.BaseTypes.Add(invalidTypeReference1); yield return new object[] { invalidTypeBaseType3 }; var invalidTypeBaseType4 = new CodeTypeDeclaration("name"); invalidTypeBaseType4.BaseTypes.Add(invalidTypeReference2); yield return new object[] { invalidTypeBaseType4 }; var invalidTypeBaseType5 = new CodeTypeDeclaration("name"); invalidTypeBaseType5.BaseTypes.Add(invalidTypeReference3); yield return new object[] { invalidTypeBaseType5 }; // CodeTypeDelegate. yield return new object[] { new CodeTypeDelegate() }; yield return new object[] { new CodeTypeDelegate(null) }; yield return new object[] { new CodeTypeDelegate(string.Empty) }; yield return new object[] { new CodeTypeDelegate("0") }; yield return new object[] { new CodeTypeDelegate("name") { ReturnType = new CodeTypeReference() } }; yield return new object[] { new CodeTypeDelegate("name") { ReturnType = new CodeTypeReference("0") } }; yield return new object[] { new CodeTypeDelegate("name") { ReturnType = invalidTypeReference1 } }; yield return new object[] { new CodeTypeDelegate("name") { ReturnType = invalidTypeReference2 } }; yield return new object[] { new CodeTypeDelegate("name") { ReturnType = invalidTypeReference3 } }; var invalidDelegateAttribute1 = new CodeTypeDelegate("name"); invalidDelegateAttribute1.CustomAttributes.Add(new CodeAttributeDeclaration()); yield return new object[] { invalidDelegateAttribute1 }; var invalidDelegateAttribute2 = new CodeTypeDelegate("name"); invalidDelegateAttribute2.CustomAttributes.Add(new CodeAttributeDeclaration((string)null)); yield return new object[] { invalidDelegateAttribute2 }; var invalidDelegateAttribute3 = new CodeTypeDelegate("name"); invalidDelegateAttribute3.CustomAttributes.Add(new CodeAttributeDeclaration(string.Empty)); yield return new object[] { invalidDelegateAttribute3 }; var invalidDelegateAttribute4 = new CodeTypeDelegate("name"); invalidDelegateAttribute4.CustomAttributes.Add(new CodeAttributeDeclaration("0")); yield return new object[] { invalidDelegateAttribute4 }; var invalidDelegateAttribute5 = new CodeTypeDelegate("name"); invalidDelegateAttribute5.CustomAttributes.Add(new CodeAttributeDeclaration("name", new CodeAttributeArgument("0", new CodePrimitiveExpression(1)))); yield return new object[] { invalidDelegateAttribute5 }; var invalidDelegateAttribute6 = new CodeTypeDelegate("name"); invalidDelegateAttribute6.CustomAttributes.Add(new CodeAttributeDeclaration("name", new CodeAttributeArgument("name", new CodeExpression()))); yield return new object[] { invalidDelegateAttribute6 }; var invalidDelegateTypeParameter1 = new CodeTypeDelegate("name"); invalidDelegateTypeParameter1.TypeParameters.Add(new CodeTypeParameter()); yield return new object[] { invalidDelegateTypeParameter1 }; var invalidDelegateTypeParameter2 = new CodeTypeDelegate("name"); invalidDelegateTypeParameter2.TypeParameters.Add(new CodeTypeParameter(null)); yield return new object[] { invalidDelegateTypeParameter2 }; var invalidDelegateTypeParameter3 = new CodeTypeDelegate("name"); invalidDelegateTypeParameter3.TypeParameters.Add(new CodeTypeParameter(string.Empty)); yield return new object[] { invalidDelegateTypeParameter3 }; var invalidDelegateTypeParameter4 = new CodeTypeDelegate("name"); invalidDelegateTypeParameter4.TypeParameters.Add(new CodeTypeParameter("0")); yield return new object[] { invalidDelegateTypeParameter4 }; var invalidDelegateTypeParameterAttribute1 = new CodeTypeDelegate("name"); invalidDelegateTypeParameterAttribute1.TypeParameters.Add(invalidTypeParameterAttribute1); yield return new object[] { invalidDelegateTypeParameterAttribute1 }; var invalidDelegateTypeParameterAttribute2 = new CodeTypeDelegate("name"); invalidDelegateTypeParameterAttribute2.TypeParameters.Add(invalidTypeParameterAttribute2); yield return new object[] { invalidDelegateTypeParameterAttribute2 }; var invalidDelegateTypeParameterAttribute3 = new CodeTypeDelegate("name"); invalidDelegateTypeParameterAttribute3.TypeParameters.Add(invalidTypeParameterAttribute3); yield return new object[] { invalidDelegateTypeParameterAttribute3 }; var invalidDelegateTypeParameterAttribute4 = new CodeTypeDelegate("name"); invalidDelegateTypeParameterAttribute4.TypeParameters.Add(invalidTypeParameterAttribute4); yield return new object[] { invalidDelegateTypeParameterAttribute4 }; var invalidDelegateTypeParameterAttribute5 = new CodeTypeDelegate("name"); invalidDelegateTypeParameterAttribute5.TypeParameters.Add(invalidTypeParameterAttribute5); yield return new object[] { invalidDelegateTypeParameterAttribute5 }; var invalidDelegateTypeParameterAttribute6 = new CodeTypeDelegate("name"); invalidDelegateTypeParameterAttribute6.TypeParameters.Add(invalidTypeParameterAttribute6); yield return new object[] { invalidDelegateTypeParameterAttribute6 }; var invalidDelegateTypeParameterConstraint1 = new CodeTypeDelegate("name"); invalidDelegateTypeParameterConstraint1.TypeParameters.Add(invalidParameterConstraint1); yield return new object[] { invalidDelegateTypeParameterConstraint1 }; var invalidDelegateTypeParameterConstraint2 = new CodeTypeDelegate("name"); invalidDelegateTypeParameterConstraint2.TypeParameters.Add(invalidParameterConstraint2); yield return new object[] { invalidDelegateTypeParameterConstraint2 }; var invalidDelegateTypeParameterConstraint3 = new CodeTypeDelegate("name"); invalidDelegateTypeParameterConstraint3.TypeParameters.Add(invalidParameterConstraint6); yield return new object[] { invalidDelegateTypeParameterConstraint3 }; var invalidDelegateTypeParameterConstraint4 = new CodeTypeDelegate("name"); invalidDelegateTypeParameterConstraint4.TypeParameters.Add(invalidParameterConstraint7); yield return new object[] { invalidDelegateTypeParameterConstraint4 }; var invalidDelegateTypeParameterConstraint5 = new CodeTypeDelegate("name"); invalidDelegateTypeParameterConstraint5.TypeParameters.Add(invalidParameterConstraint8); yield return new object[] { invalidDelegateTypeParameterConstraint5 }; var invalidDelegateBaseType1 = new CodeTypeDelegate("name"); invalidDelegateBaseType1.BaseTypes.Add(new CodeTypeReference()); yield return new object[] { invalidDelegateBaseType1 }; var invalidDelegateBaseType2 = new CodeTypeDelegate("name"); invalidDelegateBaseType2.BaseTypes.Add(new CodeTypeReference("0")); yield return new object[] { invalidDelegateBaseType2 }; var invalidDelegateBaseType3 = new CodeTypeDelegate("name"); invalidDelegateBaseType3.BaseTypes.Add(invalidTypeReference1); yield return new object[] { invalidDelegateBaseType3 }; var invalidDelegateBaseType4 = new CodeTypeDelegate("name"); invalidDelegateBaseType4.BaseTypes.Add(invalidTypeReference2); yield return new object[] { invalidDelegateBaseType4 }; var invalidDelegateBaseType5 = new CodeTypeDelegate("name"); invalidDelegateBaseType5.BaseTypes.Add(invalidTypeReference3); yield return new object[] { invalidDelegateBaseType5 }; var invalidDelegateParameter1 = new CodeTypeDelegate { Name = "name" }; invalidDelegateParameter1.Parameters.Add(new CodeParameterDeclarationExpression()); yield return new object[] { invalidDelegateParameter1 }; var invalidDelegateParameter2 = new CodeTypeDelegate { Name = "name" }; invalidDelegateParameter2.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference(), "name")); yield return new object[] { invalidDelegateParameter2 }; var invalidDelegateParameter3 = new CodeTypeDelegate { Name = "name" }; invalidDelegateParameter3.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference("0"), "name")); yield return new object[] { invalidDelegateParameter3 }; var invalidDelegateParameter4 = new CodeTypeDelegate { Name = "name" }; invalidDelegateParameter4.Parameters.Add(new CodeParameterDeclarationExpression(invalidTypeReference1, "name")); yield return new object[] { invalidDelegateParameter4 }; var invalidDelegateParameter5 = new CodeTypeDelegate { Name = "name" }; invalidDelegateParameter5.Parameters.Add(new CodeParameterDeclarationExpression(invalidTypeReference2, "name")); yield return new object[] { invalidDelegateParameter5 }; var invalidDelegateParameter6 = new CodeTypeDelegate { Name = "name" }; invalidDelegateParameter6.Parameters.Add(new CodeParameterDeclarationExpression(invalidTypeReference3, "name")); yield return new object[] { invalidDelegateParameter6 }; var invalidDelegateParameter7 = new CodeTypeDelegate { Name = "name" }; invalidDelegateParameter7.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference("type"), null)); yield return new object[] { invalidDelegateParameter7 }; var invalidDelegateParameter8 = new CodeTypeDelegate { Name = "name" }; invalidDelegateParameter8.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference("type"), string.Empty)); yield return new object[] { invalidDelegateParameter8 }; var invalidDelegateParameter9 = new CodeTypeDelegate { Name = "name" }; invalidDelegateParameter9.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference("type"), "0")); yield return new object[] { invalidDelegateParameter9 }; var invalidDelegateParameterAttribute1 = new CodeTypeDelegate { Name = "name" }; invalidDelegateParameterAttribute1.Parameters.Add(invalidParameterAttribute1); yield return new object[] { invalidDelegateParameterAttribute1 }; var invalidDelegateParameterAttribute2 = new CodeTypeDelegate { Name = "name" }; invalidDelegateParameterAttribute2.Parameters.Add(invalidParameterAttribute2); yield return new object[] { invalidDelegateParameterAttribute2 }; var invalidDelegateParameterAttribute3 = new CodeTypeDelegate { Name = "name" }; invalidDelegateParameterAttribute3.Parameters.Add(invalidParameterAttribute3); yield return new object[] { invalidDelegateParameterAttribute3 }; var invalidDelegateParameterAttribute4 = new CodeTypeDelegate { Name = "name" }; invalidDelegateParameterAttribute4.Parameters.Add(invalidParameterAttribute4); yield return new object[] { invalidDelegateParameterAttribute4 }; var invalidDelegateParameterAttribute5 = new CodeTypeDelegate { Name = "name" }; invalidDelegateParameterAttribute5.Parameters.Add(invalidParameterAttribute5); yield return new object[] { invalidDelegateParameterAttribute5 }; var invalidDelegateParameterAttribute6 = new CodeTypeDelegate { Name = "name" }; invalidDelegateParameterAttribute6.Parameters.Add(invalidParameterAttribute6); yield return new object[] { invalidDelegateParameterAttribute6 }; // CodeNamespace. yield return new object[] { new CodeNamespace("0") }; var invalidNamespaceType1 = new CodeNamespace("name"); invalidNamespaceType1.Types.Add(new CodeTypeDeclaration()); yield return new object[] { invalidNamespaceType1 }; var invalidNamespaceType2 = new CodeNamespace("name"); invalidNamespaceType2.Types.Add(new CodeTypeDeclaration(null)); yield return new object[] { invalidNamespaceType2 }; var invalidNamespaceType3 = new CodeNamespace("name"); invalidNamespaceType3.Types.Add(new CodeTypeDeclaration(string.Empty)); yield return new object[] { invalidNamespaceType3 }; var invalidNamespaceType4 = new CodeNamespace("name"); invalidNamespaceType4.Types.Add(new CodeTypeDeclaration("0")); yield return new object[] { invalidNamespaceType4 }; // CodeCompileUnit. var invalidCompileUnitAttribute1 = new CodeCompileUnit(); invalidCompileUnitAttribute1.AssemblyCustomAttributes.Add(new CodeAttributeDeclaration()); yield return new object[] { invalidCompileUnitAttribute1 }; var invalidCompileUnitAttribute2 = new CodeCompileUnit(); invalidCompileUnitAttribute2.AssemblyCustomAttributes.Add(new CodeAttributeDeclaration((string)null)); yield return new object[] { invalidCompileUnitAttribute2 }; var invalidCompileUnitAttribute3 = new CodeCompileUnit(); invalidCompileUnitAttribute3.AssemblyCustomAttributes.Add(new CodeAttributeDeclaration(string.Empty)); yield return new object[] { invalidCompileUnitAttribute3 }; var invalidCompileUnitAttribute4 = new CodeCompileUnit(); invalidCompileUnitAttribute4.AssemblyCustomAttributes.Add(new CodeAttributeDeclaration("0")); yield return new object[] { invalidCompileUnitAttribute4 }; var invalidCompileUnitAttribute5 = new CodeCompileUnit(); invalidCompileUnitAttribute5.AssemblyCustomAttributes.Add(new CodeAttributeDeclaration("name", new CodeAttributeArgument("0", new CodePrimitiveExpression(1)))); yield return new object[] { invalidCompileUnitAttribute5 }; var invalidCompileUnitAttribute6 = new CodeCompileUnit(); invalidCompileUnitAttribute6.AssemblyCustomAttributes.Add(new CodeAttributeDeclaration("name", new CodeAttributeArgument("name", new CodeExpression()))); yield return new object[] { invalidCompileUnitAttribute6 }; var invalidCompileUnitStartDirective1 = new CodeCompileUnit(); invalidCompileUnitStartDirective1.StartDirectives.Add(new CodeDirective()); yield return new object[] { invalidCompileUnitStartDirective1 }; var invalidCompileUnitStartDirective2 = new CodeCompileUnit(); invalidCompileUnitStartDirective2.StartDirectives.Add(new CodeChecksumPragma("\0", Guid.NewGuid(), new byte[0])); yield return new object[] { invalidCompileUnitStartDirective2 }; foreach (char newLineChar in new char[] { '\r', '\n', '\u2028', '\u2029', '\u0085' }) { var invalidCompileUnitStartDirective3 = new CodeCompileUnit(); invalidCompileUnitStartDirective3.StartDirectives.Add(new CodeRegionDirective(CodeRegionMode.None, $"te{newLineChar}xt")); yield return new object[] { invalidCompileUnitStartDirective3 }; } var invalidCompileUnitEndDirective1 = new CodeCompileUnit(); invalidCompileUnitEndDirective1.EndDirectives.Add(new CodeDirective()); yield return new object[] { invalidCompileUnitEndDirective1 }; var invalidCompileUnitEndDirective2 = new CodeCompileUnit(); invalidCompileUnitEndDirective2.EndDirectives.Add(new CodeChecksumPragma("\0", Guid.NewGuid(), new byte[0])); yield return new object[] { invalidCompileUnitEndDirective2 }; foreach (char newLineChar in new char[] { '\r', '\n', '\u2028', '\u2029', '\u0085' }) { var invalidCompileUnitEndDirective3 = new CodeCompileUnit(); invalidCompileUnitEndDirective3.EndDirectives.Add(new CodeRegionDirective(CodeRegionMode.None, $"te{newLineChar}xt")); yield return new object[] { invalidCompileUnitEndDirective3 }; } // CodeSnippetCompileUnit. var invalidSnippetCompileUnitStartDirective1 = new CodeSnippetCompileUnit(); invalidSnippetCompileUnitStartDirective1.StartDirectives.Add(new CodeDirective()); yield return new object[] { invalidSnippetCompileUnitStartDirective1 }; var invalidSnippetCompileUnitStartDirective2 = new CodeSnippetCompileUnit(); invalidSnippetCompileUnitStartDirective2.StartDirectives.Add(new CodeChecksumPragma("\0", Guid.NewGuid(), new byte[0])); yield return new object[] { invalidSnippetCompileUnitStartDirective2 }; foreach (char newLineChar in new char[] { '\r', '\n', '\u2028', '\u2029', '\u0085' }) { var invalidSnippetCompileUnitStartDirective3 = new CodeSnippetCompileUnit(); invalidSnippetCompileUnitStartDirective3.StartDirectives.Add(new CodeRegionDirective(CodeRegionMode.None, $"te{newLineChar}xt")); yield return new object[] { invalidSnippetCompileUnitStartDirective3 }; } var invalidSnippetCompileUnitEndDirective1 = new CodeSnippetCompileUnit(); invalidSnippetCompileUnitEndDirective1.EndDirectives.Add(new CodeDirective()); yield return new object[] { invalidSnippetCompileUnitEndDirective1 }; var invalidSnippetCompileUnitEndDirective2 = new CodeSnippetCompileUnit(); invalidSnippetCompileUnitEndDirective2.EndDirectives.Add(new CodeChecksumPragma("\0", Guid.NewGuid(), new byte[0])); yield return new object[] { invalidSnippetCompileUnitEndDirective2 }; foreach (char newLineChar in new char[] { '\r', '\n', '\u2028', '\u2029', '\u0085' }) { var invalidSnippetCompileUnitEndDirective3 = new CodeSnippetCompileUnit(); invalidSnippetCompileUnitEndDirective3.EndDirectives.Add(new CodeRegionDirective(CodeRegionMode.None, $"te{newLineChar}xt")); yield return new object[] { invalidSnippetCompileUnitEndDirective3 }; } // CodeArrayCreateExpression. yield return new object[] { new CodeArrayCreateExpression(new CodeTypeReference()) }; yield return new object[] { new CodeArrayCreateExpression(new CodeTypeReference("0")) }; yield return new object[] { new CodeArrayCreateExpression(invalidTypeReference1) }; yield return new object[] { new CodeArrayCreateExpression(invalidTypeReference2) }; yield return new object[] { new CodeArrayCreateExpression(invalidTypeReference3) { SizeExpression = new CodeExpression() } } ; yield return new object[] { new CodeArrayCreateExpression(new CodeTypeReference("name"), new CodeExpression[] { new CodeExpression() }) }; // CodeBinaryOperatorExpression. yield return new object[] { new CodeBinaryOperatorExpression(new CodeExpression(), CodeBinaryOperatorType.Add, new CodePrimitiveExpression(2)) }; yield return new object[] { new CodeBinaryOperatorExpression(new CodePrimitiveExpression(1), CodeBinaryOperatorType.Add, new CodeExpression()) }; // CodeCastExpression. yield return new object[] { new CodeCastExpression(new CodeTypeReference(), new CodePrimitiveExpression(2)) }; yield return new object[] { new CodeCastExpression(new CodeTypeReference("0"), new CodePrimitiveExpression(2)) }; yield return new object[] { new CodeCastExpression(invalidTypeReference1, new CodePrimitiveExpression(2)) }; yield return new object[] { new CodeCastExpression(invalidTypeReference2, new CodePrimitiveExpression(2)) }; yield return new object[] { new CodeCastExpression(invalidTypeReference3, new CodePrimitiveExpression(2)) }; yield return new object[] { new CodeCastExpression(new CodeTypeReference("name"), new CodeExpression()) }; // CodeDefaultValueExpression. yield return new object[] { new CodeDefaultValueExpression(new CodeTypeReference()) }; yield return new object[] { new CodeDefaultValueExpression(new CodeTypeReference("0")) }; yield return new object[] { new CodeDefaultValueExpression(invalidTypeReference1) }; yield return new object[] { new CodeDefaultValueExpression(invalidTypeReference2) }; yield return new object[] { new CodeDefaultValueExpression(invalidTypeReference3) }; // CodeDelegateCreateExpression. yield return new object[] { new CodeDelegateCreateExpression(new CodeTypeReference(), new CodePrimitiveExpression(1), "name") }; yield return new object[] { new CodeDelegateCreateExpression(new CodeTypeReference("0"), new CodePrimitiveExpression(1), "name") }; yield return new object[] { new CodeDelegateCreateExpression(invalidTypeReference1, new CodePrimitiveExpression(1), "name") }; yield return new object[] { new CodeDelegateCreateExpression(invalidTypeReference2, new CodePrimitiveExpression(1), "name") }; yield return new object[] { new CodeDelegateCreateExpression(invalidTypeReference3, new CodePrimitiveExpression(1), "name") }; yield return new object[] { new CodeDelegateCreateExpression(new CodeTypeReference("name"), new CodeExpression(), "name") }; yield return new object[] { new CodeDelegateCreateExpression(new CodeTypeReference("name"), new CodePrimitiveExpression(1), null) }; yield return new object[] { new CodeDelegateCreateExpression(new CodeTypeReference("name"), new CodePrimitiveExpression(1), string.Empty) }; yield return new object[] { new CodeDelegateCreateExpression(new CodeTypeReference("name"), new CodePrimitiveExpression(1), "0") }; // CodeFieldReferenceExpression. yield return new object[] { new CodeFieldReferenceExpression() }; yield return new object[] { new CodeFieldReferenceExpression(null, null) }; yield return new object[] { new CodeFieldReferenceExpression(null, string.Empty) }; yield return new object[] { new CodeFieldReferenceExpression(null, "0") }; yield return new object[] { new CodeFieldReferenceExpression(new CodeExpression(), "name") }; // CodeArgumentReferenceExpression. yield return new object[] { new CodeArgumentReferenceExpression() }; yield return new object[] { new CodeArgumentReferenceExpression(null) }; yield return new object[] { new CodeArgumentReferenceExpression(string.Empty) }; yield return new object[] { new CodeArgumentReferenceExpression("0") }; // CodeVariableReferenceExpression. yield return new object[] { new CodeVariableReferenceExpression() }; yield return new object[] { new CodeVariableReferenceExpression(null) }; yield return new object[] { new CodeVariableReferenceExpression(string.Empty) }; yield return new object[] { new CodeVariableReferenceExpression("0") }; // CodeIndexerExpression. yield return new object[] { new CodeIndexerExpression(new CodeExpression()) }; yield return new object[] { new CodeIndexerExpression(new CodePrimitiveExpression(1), new CodeExpression()) }; // CodeArrayIndexerExpression. yield return new object[] { new CodeArrayIndexerExpression(new CodeExpression()) }; yield return new object[] { new CodeArrayIndexerExpression(new CodePrimitiveExpression(1), new CodeExpression()) }; // CodeMethodInvokeExpression. yield return new object[] { new CodeMethodInvokeExpression(new CodeMethodReferenceExpression()) }; yield return new object[] { new CodeMethodInvokeExpression(new CodeMethodReferenceExpression(null, null)) }; yield return new object[] { new CodeMethodInvokeExpression(new CodeMethodReferenceExpression(null, string.Empty)) }; yield return new object[] { new CodeMethodInvokeExpression(new CodeMethodReferenceExpression(null, "0")) }; yield return new object[] { new CodeMethodInvokeExpression(new CodeMethodReferenceExpression(new CodeExpression(), "name")) }; yield return new object[] { new CodeMethodInvokeExpression(new CodeMethodReferenceExpression(new CodePrimitiveExpression(1), "name", new CodeTypeReference[] { new CodeTypeReference() })) }; yield return new object[] { new CodeMethodInvokeExpression(new CodeMethodReferenceExpression(new CodePrimitiveExpression(1), "name", new CodeTypeReference[] { new CodeTypeReference("0") })) }; yield return new object[] { new CodeMethodInvokeExpression(new CodeMethodReferenceExpression(new CodePrimitiveExpression(1), "name", new CodeTypeReference[] { invalidTypeReference1 })) }; yield return new object[] { new CodeMethodInvokeExpression(new CodeMethodReferenceExpression(new CodePrimitiveExpression(1), "name", new CodeTypeReference[] { invalidTypeReference2 })) }; yield return new object[] { new CodeMethodInvokeExpression(new CodeMethodReferenceExpression(new CodePrimitiveExpression(1), "name", new CodeTypeReference[] { invalidTypeReference3 })) }; yield return new object[] { new CodeMethodInvokeExpression(new CodeMethodReferenceExpression(null, "name"), new CodeExpression()) }; // CodeMethodReferenceExpression. yield return new object[] { new CodeMethodReferenceExpression() }; yield return new object[] { new CodeMethodReferenceExpression(null, null) }; yield return new object[] { new CodeMethodReferenceExpression(null, string.Empty) }; yield return new object[] { new CodeMethodReferenceExpression(null, "0") }; yield return new object[] { new CodeMethodReferenceExpression(new CodeExpression(), "name") }; yield return new object[] { new CodeMethodReferenceExpression(new CodePrimitiveExpression(1), "name", new CodeTypeReference[] { new CodeTypeReference() }) }; yield return new object[] { new CodeMethodReferenceExpression(new CodePrimitiveExpression(1), "name", new CodeTypeReference[] { new CodeTypeReference("0") }) }; yield return new object[] { new CodeMethodReferenceExpression(new CodePrimitiveExpression(1), "name", new CodeTypeReference[] { invalidTypeReference1 }) }; yield return new object[] { new CodeMethodReferenceExpression(new CodePrimitiveExpression(1), "name", new CodeTypeReference[] { invalidTypeReference2 }) }; yield return new object[] { new CodeMethodReferenceExpression(new CodePrimitiveExpression(1), "name", new CodeTypeReference[] { invalidTypeReference3 }) }; // CodeEventReferenceExpression. yield return new object[] { new CodeEventReferenceExpression() }; yield return new object[] { new CodeEventReferenceExpression(null, null) }; yield return new object[] { new CodeEventReferenceExpression(null, string.Empty) }; yield return new object[] { new CodeEventReferenceExpression(null, "0") }; yield return new object[] { new CodeEventReferenceExpression(new CodeExpression(), "name") }; // CodeDelegateInvokeExpression. yield return new object[] { new CodeDelegateInvokeExpression(new CodeExpression()) }; yield return new object[] { new CodeDelegateInvokeExpression(new CodePrimitiveExpression(1), new CodeExpression()) }; // CodeObjectCreateExpression. yield return new object[] { new CodeObjectCreateExpression(new CodeTypeReference()) }; yield return new object[] { new CodeObjectCreateExpression(new CodeTypeReference("0")) }; yield return new object[] { new CodeObjectCreateExpression(invalidTypeReference1) }; yield return new object[] { new CodeObjectCreateExpression(invalidTypeReference2) }; yield return new object[] { new CodeObjectCreateExpression(invalidTypeReference3) }; yield return new object[] { new CodeObjectCreateExpression(new CodeTypeReference("name"), new CodeExpression()) }; // CodeDirectionExpression. yield return new object[] { new CodeDirectionExpression(FieldDirection.In, new CodeExpression()) }; // CodePropertyReferenceExpression. yield return new object[] { new CodePropertyReferenceExpression() }; yield return new object[] { new CodePropertyReferenceExpression(null, null) }; yield return new object[] { new CodePropertyReferenceExpression(null, string.Empty) }; yield return new object[] { new CodePropertyReferenceExpression(null, "0") }; yield return new object[] { new CodePropertyReferenceExpression(new CodeExpression(), "name") }; // CodeTypeReferenceExpression. yield return new object[] { new CodeTypeReferenceExpression(new CodeTypeReference()) }; yield return new object[] { new CodeTypeReferenceExpression(new CodeTypeReference("0")) }; yield return new object[] { new CodeTypeReferenceExpression(invalidTypeReference1) }; yield return new object[] { new CodeTypeReferenceExpression(invalidTypeReference2) }; yield return new object[] { new CodeTypeReferenceExpression(invalidTypeReference3) }; // CodeTypeOfExpression. yield return new object[] { new CodeTypeOfExpression(new CodeTypeReference()) }; yield return new object[] { new CodeTypeOfExpression(new CodeTypeReference("0")) }; yield return new object[] { new CodeTypeOfExpression(invalidTypeReference1) }; yield return new object[] { new CodeTypeOfExpression(invalidTypeReference2) }; yield return new object[] { new CodeTypeOfExpression(invalidTypeReference3) }; // CodeMethodReturnStatement. yield return new object[] { new CodeMethodReturnStatement(new CodeExpression()) }; // CodeConditionStatement. yield return new object[] { new CodeConditionStatement(new CodePrimitiveExpression("1"), new CodeStatement[] { new CodeStatement() }, new CodeStatement[] { new CodeMethodReturnStatement(), new CodeMethodReturnStatement { LinePragma = new CodeLinePragma() } }) }; yield return new object[] { new CodeConditionStatement(new CodePrimitiveExpression("1"), new CodeStatement[] { new CodeMethodReturnStatement() }, new CodeStatement[] { new CodeStatement() }) }; // CodeTryCatchFinallyStatement. yield return new object[] { new CodeTryCatchFinallyStatement( new CodeStatement[] { new CodeStatement() }, new CodeCatchClause[] { new CodeCatchClause("localName") }, new CodeStatement[] { new CodeMethodReturnStatement() } ) }; yield return new object[] { new CodeTryCatchFinallyStatement( new CodeStatement[] { new CodeMethodReturnStatement() }, new CodeCatchClause[] { new CodeCatchClause() }, new CodeStatement[] { new CodeMethodReturnStatement() } ) }; yield return new object[] { new CodeTryCatchFinallyStatement( new CodeStatement[] { new CodeMethodReturnStatement() }, new CodeCatchClause[] { new CodeCatchClause(null) }, new CodeStatement[] { new CodeMethodReturnStatement() } ) }; yield return new object[] { new CodeTryCatchFinallyStatement( new CodeStatement[] { new CodeMethodReturnStatement() }, new CodeCatchClause[] { new CodeCatchClause(string.Empty) }, new CodeStatement[] { new CodeMethodReturnStatement() } ) }; yield return new object[] { new CodeTryCatchFinallyStatement( new CodeStatement[] { new CodeMethodReturnStatement() }, new CodeCatchClause[] { new CodeCatchClause("0") }, new CodeStatement[] { new CodeMethodReturnStatement() } ) }; yield return new object[] { new CodeTryCatchFinallyStatement( new CodeStatement[] { new CodeMethodReturnStatement() }, new CodeCatchClause[] { new CodeCatchClause("localName", new CodeTypeReference()) }, new CodeStatement[] { new CodeMethodReturnStatement() } ) }; yield return new object[] { new CodeTryCatchFinallyStatement( new CodeStatement[] { new CodeMethodReturnStatement() }, new CodeCatchClause[] { new CodeCatchClause("localName", new CodeTypeReference("0")) }, new CodeStatement[] { new CodeMethodReturnStatement() } ) }; yield return new object[] { new CodeTryCatchFinallyStatement( new CodeStatement[] { new CodeMethodReturnStatement() }, new CodeCatchClause[] { new CodeCatchClause("localName", new CodeTypeReference("exceptionType"), new CodeStatement()) }, new CodeStatement[] { new CodeMethodReturnStatement() } ) }; yield return new object[] { new CodeTryCatchFinallyStatement( new CodeStatement[] { new CodeMethodReturnStatement() }, new CodeCatchClause[] { new CodeCatchClause("localName") }, new CodeStatement[] { new CodeStatement() } ) }; // CodeAssignStatement. yield return new object[] { new CodeAssignStatement(new CodeExpression(), new CodePrimitiveExpression(1)) }; yield return new object[] { new CodeAssignStatement(new CodePrimitiveExpression(1), new CodeExpression()) }; // CodeExpressionStatement. yield return new object[] { new CodeExpressionStatement(new CodeExpression()) }; // CodeIterationStatement. yield return new object[] { new CodeIterationStatement(new CodeStatement(), new CodePrimitiveExpression(1), new CodeMethodReturnStatement()) }; yield return new object[] { new CodeIterationStatement(new CodeMethodReturnStatement(), new CodeExpression(), new CodeMethodReturnStatement()) }; yield return new object[] { new CodeIterationStatement(new CodeMethodReturnStatement(), new CodePrimitiveExpression(1), new CodeStatement()) }; yield return new object[] { new CodeIterationStatement(new CodeMethodReturnStatement(), new CodePrimitiveExpression(1), new CodeMethodReturnStatement(), new CodeStatement()) }; // CodeThrowExceptionStatement. yield return new object[] { new CodeThrowExceptionStatement(new CodeExpression()) }; // CodeVariableDeclarationStatement. yield return new object[] { new CodeVariableDeclarationStatement() }; yield return new object[] { new CodeVariableDeclarationStatement(new CodeTypeReference(), "name") }; yield return new object[] { new CodeVariableDeclarationStatement(new CodeTypeReference("0"), "name") }; yield return new object[] { new CodeVariableDeclarationStatement(invalidTypeReference1, "name") }; yield return new object[] { new CodeVariableDeclarationStatement(invalidTypeReference2, "name") }; yield return new object[] { new CodeVariableDeclarationStatement(invalidTypeReference3, "name") }; yield return new object[] { new CodeVariableDeclarationStatement(new CodeTypeReference("name"), null) }; yield return new object[] { new CodeVariableDeclarationStatement(new CodeTypeReference("name"), string.Empty) }; yield return new object[] { new CodeVariableDeclarationStatement(new CodeTypeReference("name"), "0") }; yield return new object[] { new CodeVariableDeclarationStatement(new CodeTypeReference("name"), "name", new CodeExpression()) }; // CodeAttachEventStatement. yield return new object[] { new CodeAttachEventStatement() }; yield return new object[] { new CodeAttachEventStatement(new CodeEventReferenceExpression(), new CodePrimitiveExpression(1)) }; yield return new object[] { new CodeAttachEventStatement(new CodeEventReferenceExpression(null, null), new CodePrimitiveExpression(1)) }; yield return new object[] { new CodeAttachEventStatement(new CodeEventReferenceExpression(null, string.Empty), new CodePrimitiveExpression(1)) }; yield return new object[] { new CodeAttachEventStatement(new CodeEventReferenceExpression(null, "0"), new CodePrimitiveExpression(1)) }; yield return new object[] { new CodeAttachEventStatement(new CodeEventReferenceExpression(new CodeExpression(), "name"), new CodePrimitiveExpression(1)) }; yield return new object[] { new CodeAttachEventStatement(null, new CodeExpression()) }; // CodeRemoveEventStatement. yield return new object[] { new CodeRemoveEventStatement() }; yield return new object[] { new CodeRemoveEventStatement(new CodeEventReferenceExpression(), new CodePrimitiveExpression(1)) }; yield return new object[] { new CodeRemoveEventStatement(new CodeEventReferenceExpression(null, null), new CodePrimitiveExpression(1)) }; yield return new object[] { new CodeRemoveEventStatement(new CodeEventReferenceExpression(null, string.Empty), new CodePrimitiveExpression(1)) }; yield return new object[] { new CodeRemoveEventStatement(new CodeEventReferenceExpression(null, "0"), new CodePrimitiveExpression(1)) }; yield return new object[] { new CodeRemoveEventStatement(new CodeEventReferenceExpression(new CodeExpression(), "name"), new CodePrimitiveExpression(1)) }; yield return new object[] { new CodeRemoveEventStatement(null, new CodeExpression()) }; // CodeGotoStatement. yield return new object[] { new CodeGotoStatement() }; yield return new object[] { new CodeGotoStatement("0") }; // CodeLabeledStatement. yield return new object[] { new CodeLabeledStatement() }; yield return new object[] { new CodeLabeledStatement(null) }; yield return new object[] { new CodeLabeledStatement(string.Empty) }; yield return new object[] { new CodeLabeledStatement("0") }; yield return new object[] { new CodeLabeledStatement("name", new CodeStatement()) }; // Misc. yield return new object[] { new CodeStatement() }; yield return new object[] { new CustomCodeStatement() }; yield return new object[] { new CodeExpression() }; yield return new object[] { new CustomCodeExpression() }; yield return new object[] { new CodeDirective() }; yield return new object[] { new CustomCodeDirective() }; yield return new object[] { new CodeTypeParameter() }; yield return new object[] { new CodeTypeParameter("name") }; yield return new object[] { new CodeObject() }; yield return new object[] { new CustomCodeObject() }; yield return new object[] { new CodeTypeMember() }; yield return new object[] { new CustomCodeTypeMember() }; yield return new object[] { new CodeTypeReference(";") }; yield return new object[] { new CodeTypeReference("/") }; yield return new object[] { new CodeTypeReference("#") }; yield return new object[] { new CodeTypeReference("%") }; yield return new object[] { new CodeTypeReference("=") }; yield return new object[] { new CodeTypeReference("?") }; yield return new object[] { new CodeTypeReference("\\") }; yield return new object[] { new CodeTypeReference("^") }; yield return new object[] { new CodeTypeReference("'") }; yield return new object[] { new CodeTypeReference(")") }; yield return new object[] { new CodeTypeReference("(") }; } public static IEnumerable<object[]> ValidIdentifier_InvalidMemberInType_TestData() { foreach (object[] testData in ValidateIdentifiers_Invalid_TestData()) { if (testData[0] is CodeTypeMember member) { var t = new CodeTypeDeclaration("name"); t.Members.Add(member); yield return new object[] { t }; var n = new CodeNamespace("namespace"); n.Types.Add(t); yield return new object[] { n }; } else if (testData[0] is CodeTypeDeclaration type) { var n = new CodeNamespace(); n.Types.Add(type); yield return new object[] { n }; } } } [Theory] [MemberData(nameof(ValidateIdentifiers_Invalid_TestData))] [MemberData(nameof(ValidIdentifier_InvalidMemberInType_TestData))] [SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework, "Fixed incorrect param name in some situations")] public void ValidateIdentifiers_InvalidE_ThrowsArgumentException(CodeObject e) { AssertExtensions.Throws<ArgumentException>("e", () => CodeGenerator.ValidateIdentifiers(e)); } public static IEnumerable<object[]> ValidateIdentifiers_NullE_TestData() { yield return new object[] { null }; var invalidTypeAttribute = new CodeTypeDeclaration("name"); invalidTypeAttribute.CustomAttributes.Add(new CodeAttributeDeclaration("name", new CodeAttributeArgument())); yield return new object[] { invalidTypeAttribute }; var invalidTypeParameterAttribute = new CodeTypeParameter("parameter"); invalidTypeParameterAttribute.CustomAttributes.Add(new CodeAttributeDeclaration("attribute", new CodeAttributeArgument())); var invalidTypeTypeParameterAttribute = new CodeTypeDeclaration("name"); invalidTypeTypeParameterAttribute.TypeParameters.Add(invalidTypeParameterAttribute); yield return new object[] { invalidTypeTypeParameterAttribute }; yield return new object[] { new CodeBinaryOperatorExpression() }; yield return new object[] { new CodeBinaryOperatorExpression(null, CodeBinaryOperatorType.Add, new CodePrimitiveExpression(2)) }; yield return new object[] { new CodeBinaryOperatorExpression(new CodePrimitiveExpression(1), CodeBinaryOperatorType.Add, null) }; yield return new object[] { new CodeCastExpression() }; yield return new object[] { new CodeCastExpression(new CodeTypeReference("name"), null) }; yield return new object[] { new CodeDelegateCreateExpression() }; yield return new object[] { new CodeDelegateCreateExpression(new CodeTypeReference("name"), null, "methodName") }; yield return new object[] { new CodeIndexerExpression() }; yield return new object[] { new CodeIndexerExpression(null) }; yield return new object[] { new CodeArrayIndexerExpression() }; yield return new object[] { new CodeArrayIndexerExpression(null) }; yield return new object[] { new CodeDirectionExpression() }; yield return new object[] { new CodeDirectionExpression(FieldDirection.In, null) }; yield return new object[] { new CodeExpressionStatement() }; yield return new object[] { new CodeExpressionStatement(null) }; yield return new object[] { new CodeConditionStatement() }; yield return new object[] { new CodeConditionStatement(null) }; yield return new object[] { new CodeAssignStatement() }; yield return new object[] { new CodeAssignStatement(null, new CodePrimitiveExpression(1)) }; yield return new object[] { new CodeAssignStatement(new CodePrimitiveExpression(1), null) }; yield return new object[] { new CodeIterationStatement() }; yield return new object[] { new CodeIterationStatement(null, new CodePrimitiveExpression(1), new CodeMethodReturnStatement()) }; yield return new object[] { new CodeIterationStatement(new CodeMethodReturnStatement(), null, new CodeMethodReturnStatement()) }; yield return new object[] { new CodeIterationStatement(new CodeMethodReturnStatement(), new CodePrimitiveExpression(1), null) }; yield return new object[] { new CodeAttachEventStatement(new CodeEventReferenceExpression(new CodePrimitiveExpression(1), "name"), null) }; yield return new object[] { new CodeRemoveEventStatement(new CodeEventReferenceExpression(new CodePrimitiveExpression(1), "name"), null) }; } [Theory] [MemberData(nameof(ValidateIdentifiers_NullE_TestData))] [SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework, "Fixed NullReferenceException")] public void ValidateIdentifiers_NullE_ThrowsArgumentNullException(CodeObject e) { AssertExtensions.Throws<ArgumentNullException>("e", () => CodeGenerator.ValidateIdentifiers(e)); } private class CustomCodeExpression : CodeExpression { } private class CustomCodeStatement : CodeStatement { } private class CustomCodeTypeMember : CodeTypeMember { } private class CustomCodeDirective : CodeDirective { } private class CustomCodeObject : CodeObject { } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using Xunit; namespace System.CodeDom.Compiler.Tests { public class CodeValidatorTests { public static IEnumerable<object[]> ValidateIdentifiers_Valid_TestData() { // CodeComment. yield return new object[] { new CodeComment() }; yield return new object[] { new CodeCommentStatement() }; yield return new object[] { new CodeCommentStatement((string)null) }; yield return new object[] { new CodeCommentStatement(string.Empty) }; yield return new object[] { new CodeCommentStatement("text") }; // CodeChecksumPragma yield return new object[] { new CodeChecksumPragma() }; yield return new object[] { new CodeChecksumPragma(null, Guid.NewGuid(), new byte[0]) }; yield return new object[] { new CodeChecksumPragma(string.Empty, Guid.NewGuid(), new byte[0]) }; yield return new object[] { new CodeChecksumPragma("fileName", Guid.NewGuid(), new byte[0]) }; // CodeRegionDirective. yield return new object[] { new CodeRegionDirective() }; yield return new object[] { new CodeRegionDirective(CodeRegionMode.None, null) }; yield return new object[] { new CodeRegionDirective(CodeRegionMode.None, string.Empty) }; yield return new object[] { new CodeRegionDirective(CodeRegionMode.None, "name") }; // CodeNamespaceImport. yield return new object[] { new CodeNamespaceImport("nameSpace") }; var fullNamespaceImport = new CodeNamespaceImport("nameSpace") { LinePragma = new CodeLinePragma() }; yield return new object[] { fullNamespaceImport }; // CodeMemberEvent. yield return new object[] { new CodeMemberEvent() }; yield return new object[] { new CodeMemberEvent { Name = "0" } }; yield return new object[] { new CodeMemberEvent { Name = "name", PrivateImplementationType = new CodeTypeReference() } }; yield return new object[] { new CodeMemberEvent { Name = "name", PrivateImplementationType = new CodeTypeReference("0") } }; var fullEvent = new CodeMemberEvent { Name = "name", LinePragma = new CodeLinePragma(), PrivateImplementationType = new CodeTypeReference("type") }; fullEvent.Comments.Add(new CodeCommentStatement()); fullEvent.Comments.Add(new CodeCommentStatement("0")); fullEvent.Comments.Add(new CodeCommentStatement("text")); fullEvent.StartDirectives.Add(new CodeChecksumPragma()); fullEvent.StartDirectives.Add(new CodeChecksumPragma("fileName", Guid.NewGuid(), new byte[0])); fullEvent.EndDirectives.Add(new CodeChecksumPragma()); fullEvent.EndDirectives.Add(new CodeChecksumPragma("fileName", Guid.NewGuid(), new byte[0])); fullEvent.CustomAttributes.Add(new CodeAttributeDeclaration("attribute1")); fullEvent.CustomAttributes.Add(new CodeAttributeDeclaration("attribute2", new CodeAttributeArgument("name", new CodePrimitiveExpression(1)))); fullEvent.CustomAttributes.Add(new CodeAttributeDeclaration("attribute3", new CodeAttributeArgument(null, new CodePrimitiveExpression(1)), new CodeAttributeArgument(string.Empty, new CodePrimitiveExpression(1)), new CodeAttributeArgument("name", new CodePrimitiveExpression(1)))); fullEvent.ImplementationTypes.Add(new CodeTypeReference((string)null)); fullEvent.ImplementationTypes.Add(new CodeTypeReference(string.Empty)); fullEvent.ImplementationTypes.Add(new CodeTypeReference("constraint1")); fullEvent.ImplementationTypes.Add(new CodeTypeReference("constraint2`2", new CodeTypeReference("parameter1"), new CodeTypeReference("parameter2"))); yield return new object[] { fullEvent }; // CodeMemberField. yield return new object[] { new CodeMemberField(new CodeTypeReference("type"), "name") }; var fullField = new CodeMemberField(new CodeTypeReference("type"), "name") { LinePragma = new CodeLinePragma(), InitExpression = new CodePrimitiveExpression(1) }; fullField.Comments.Add(new CodeCommentStatement()); fullField.Comments.Add(new CodeCommentStatement("0")); fullField.Comments.Add(new CodeCommentStatement("text")); fullField.StartDirectives.Add(new CodeChecksumPragma()); fullField.StartDirectives.Add(new CodeChecksumPragma("fileName", Guid.NewGuid(), new byte[0])); fullField.EndDirectives.Add(new CodeChecksumPragma()); fullField.EndDirectives.Add(new CodeChecksumPragma("fileName", Guid.NewGuid(), new byte[0])); fullField.CustomAttributes.Add(new CodeAttributeDeclaration("attribute1")); fullField.CustomAttributes.Add(new CodeAttributeDeclaration("attribute2", new CodeAttributeArgument("name", new CodePrimitiveExpression(1)))); fullField.CustomAttributes.Add(new CodeAttributeDeclaration("attribute3", new CodeAttributeArgument(null, new CodePrimitiveExpression(1)), new CodeAttributeArgument(string.Empty, new CodePrimitiveExpression(1)), new CodeAttributeArgument("name", new CodePrimitiveExpression(1)))); yield return new object[] { fullField }; // CodeParameterDeclarationExpression. yield return new object[] { new CodeParameterDeclarationExpression(new CodeTypeReference("type"), "name") }; var fullParameter = new CodeParameterDeclarationExpression(new CodeTypeReference("type"), "name"); fullParameter.CustomAttributes.Add(new CodeAttributeDeclaration("attribute1")); fullParameter.CustomAttributes.Add(new CodeAttributeDeclaration("attribute2", new CodeAttributeArgument("name", new CodePrimitiveExpression(1)))); fullParameter.CustomAttributes.Add(new CodeAttributeDeclaration("attribute3", new CodeAttributeArgument(null, new CodePrimitiveExpression(1)), new CodeAttributeArgument(string.Empty, new CodePrimitiveExpression(1)), new CodeAttributeArgument("name", new CodePrimitiveExpression(1)))); yield return new object[] { fullParameter }; var fullTypeParameter = new CodeTypeParameter("parameter"); fullTypeParameter.CustomAttributes.Add(new CodeAttributeDeclaration("attribute1")); fullTypeParameter.CustomAttributes.Add(new CodeAttributeDeclaration("attribute2", new CodeAttributeArgument("name", new CodePrimitiveExpression(1)))); fullTypeParameter.CustomAttributes.Add(new CodeAttributeDeclaration("attribute3", new CodeAttributeArgument(null, new CodePrimitiveExpression(1)), new CodeAttributeArgument(string.Empty, new CodePrimitiveExpression(1)), new CodeAttributeArgument("name", new CodePrimitiveExpression(1)))); fullTypeParameter.Constraints.Add(new CodeTypeReference((string)null)); fullTypeParameter.Constraints.Add(new CodeTypeReference(string.Empty)); fullTypeParameter.Constraints.Add(new CodeTypeReference("constraint1")); fullTypeParameter.Constraints.Add(new CodeTypeReference("constraint2`2", new CodeTypeReference("parameter1"), new CodeTypeReference("parameter2"))); var invalidParameterAttribute1 = new CodeParameterDeclarationExpression(new CodeTypeReference("type"), "name"); invalidParameterAttribute1.CustomAttributes.Add(new CodeAttributeDeclaration()); var invalidParameterAttribute2 = new CodeParameterDeclarationExpression(new CodeTypeReference("type"), "name"); invalidParameterAttribute2.CustomAttributes.Add(new CodeAttributeDeclaration((string)null)); var invalidParameterAttribute3 = new CodeParameterDeclarationExpression(new CodeTypeReference("type"), "name"); invalidParameterAttribute3.CustomAttributes.Add(new CodeAttributeDeclaration(string.Empty)); var invalidParameterAttribute4 = new CodeParameterDeclarationExpression(new CodeTypeReference("type"), "name"); invalidParameterAttribute4.CustomAttributes.Add(new CodeAttributeDeclaration("0")); var invalidParameterAttribute5 = new CodeParameterDeclarationExpression(new CodeTypeReference("type"), "name"); invalidParameterAttribute5.CustomAttributes.Add(new CodeAttributeDeclaration("name", new CodeAttributeArgument("0", new CodePrimitiveExpression(1)))); var invalidParameterAttribute6 = new CodeParameterDeclarationExpression(new CodeTypeReference("type"), "name"); invalidParameterAttribute6.CustomAttributes.Add(new CodeAttributeDeclaration("name", new CodeAttributeArgument("name", new CodeExpression()))); // CodeMemberMethod. yield return new object[] { new CodeMemberMethod { Name = "name" } }; var abstractMethod = new CodeMemberMethod { Name = "name", Attributes = MemberAttributes.Abstract }; abstractMethod.Statements.Add(new CodeStatement()); yield return new object[] { abstractMethod }; var fullMethod = new CodeMemberMethod { Name = "name", LinePragma = new CodeLinePragma(), ReturnType = new CodeTypeReference("returnType"), PrivateImplementationType = new CodeTypeReference("privateImplementationType") }; fullMethod.Comments.Add(new CodeCommentStatement()); fullMethod.Comments.Add(new CodeCommentStatement("0")); fullMethod.Comments.Add(new CodeCommentStatement("text")); fullMethod.StartDirectives.Add(new CodeChecksumPragma()); fullMethod.StartDirectives.Add(new CodeChecksumPragma("fileName", Guid.NewGuid(), new byte[0])); fullMethod.EndDirectives.Add(new CodeChecksumPragma()); fullMethod.EndDirectives.Add(new CodeChecksumPragma("fileName", Guid.NewGuid(), new byte[0])); fullMethod.CustomAttributes.Add(new CodeAttributeDeclaration("attribute1")); fullMethod.CustomAttributes.Add(new CodeAttributeDeclaration("attribute2", new CodeAttributeArgument("name", new CodePrimitiveExpression(1)))); fullMethod.CustomAttributes.Add(new CodeAttributeDeclaration("attribute3", new CodeAttributeArgument(null, new CodePrimitiveExpression(1)), new CodeAttributeArgument(string.Empty, new CodePrimitiveExpression(1)), new CodeAttributeArgument("name", new CodePrimitiveExpression(1)))); fullMethod.ImplementationTypes.Add(new CodeTypeReference((string)null)); fullMethod.ImplementationTypes.Add(new CodeTypeReference(string.Empty)); fullMethod.ImplementationTypes.Add(new CodeTypeReference("constraint1")); fullMethod.ImplementationTypes.Add(new CodeTypeReference("constraint2`2", new CodeTypeReference("parameter1"), new CodeTypeReference("parameter2"))); fullMethod.ReturnTypeCustomAttributes.Add(new CodeAttributeDeclaration("attribute1")); fullMethod.ReturnTypeCustomAttributes.Add(new CodeAttributeDeclaration("attribute2", new CodeAttributeArgument("name", new CodePrimitiveExpression(1)))); fullMethod.ReturnTypeCustomAttributes.Add(new CodeAttributeDeclaration("attribute3", new CodeAttributeArgument(null, new CodePrimitiveExpression(1)), new CodeAttributeArgument(string.Empty, new CodePrimitiveExpression(1)), new CodeAttributeArgument("name", new CodePrimitiveExpression(1)))); fullMethod.Statements.Add(new CodeMethodReturnStatement()); fullMethod.Statements.Add(new CodeMethodReturnStatement { LinePragma = new CodeLinePragma() }); fullMethod.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference("type"), "name")); fullMethod.Parameters.Add(fullParameter); fullMethod.TypeParameters.Add(new CodeTypeParameter("parameter1")); fullMethod.TypeParameters.Add(fullTypeParameter); yield return new object[] { fullMethod }; // CodeEntryPointMethod. yield return new object[] { new CodeEntryPointMethod() }; yield return new object[] { new CodeEntryPointMethod { Name = null } }; yield return new object[] { new CodeEntryPointMethod { Name = string.Empty } }; yield return new object[] { new CodeEntryPointMethod { Name = "name" } }; yield return new object[] { new CodeEntryPointMethod { Name = "0" } }; yield return new object[] { new CodeEntryPointMethod { Name = "name", ReturnType = new CodeTypeReference() } }; yield return new object[] { new CodeEntryPointMethod { Name = "name", ReturnType = new CodeTypeReference("0") } }; yield return new object[] { new CodeEntryPointMethod { Name = "name", PrivateImplementationType = new CodeTypeReference() } }; yield return new object[] { new CodeEntryPointMethod { Name = "name", PrivateImplementationType = new CodeTypeReference("0") } }; var abstractEntryPointMethod = new CodeEntryPointMethod { Name = "name", Attributes = MemberAttributes.Abstract }; abstractEntryPointMethod.Statements.Add(new CodeMethodReturnStatement()); yield return new object[] { abstractEntryPointMethod }; var fullEntryPointMethod = new CodeEntryPointMethod { Name = "name", LinePragma = new CodeLinePragma(), ReturnType = new CodeTypeReference("returnType"), PrivateImplementationType = new CodeTypeReference("privateImplementationType") }; fullEntryPointMethod.Comments.Add(new CodeCommentStatement()); fullEntryPointMethod.Comments.Add(new CodeCommentStatement("0")); fullEntryPointMethod.Comments.Add(new CodeCommentStatement("text")); fullEntryPointMethod.StartDirectives.Add(new CodeChecksumPragma()); fullEntryPointMethod.StartDirectives.Add(new CodeChecksumPragma("fileName", Guid.NewGuid(), new byte[0])); fullEntryPointMethod.EndDirectives.Add(new CodeChecksumPragma()); fullEntryPointMethod.EndDirectives.Add(new CodeChecksumPragma("fileName", Guid.NewGuid(), new byte[0])); fullEntryPointMethod.CustomAttributes.Add(new CodeAttributeDeclaration("attribute1")); fullEntryPointMethod.CustomAttributes.Add(new CodeAttributeDeclaration("attribute2", new CodeAttributeArgument("name", new CodePrimitiveExpression(1)))); fullEntryPointMethod.CustomAttributes.Add(new CodeAttributeDeclaration("attribute3", new CodeAttributeArgument(null, new CodePrimitiveExpression(1)), new CodeAttributeArgument(string.Empty, new CodePrimitiveExpression(1)), new CodeAttributeArgument("name", new CodePrimitiveExpression(1)))); fullEntryPointMethod.CustomAttributes.Add(new CodeAttributeDeclaration()); fullEntryPointMethod.CustomAttributes.Add(new CodeAttributeDeclaration((string)null)); fullEntryPointMethod.CustomAttributes.Add(new CodeAttributeDeclaration(string.Empty)); fullEntryPointMethod.CustomAttributes.Add(new CodeAttributeDeclaration("0")); fullEntryPointMethod.CustomAttributes.Add(new CodeAttributeDeclaration("name", new CodeAttributeArgument("0", new CodePrimitiveExpression(1)))); fullEntryPointMethod.CustomAttributes.Add(new CodeAttributeDeclaration("name", new CodeAttributeArgument("name", new CodeExpression()))); fullEntryPointMethod.ImplementationTypes.Add(new CodeTypeReference((string)null)); fullEntryPointMethod.ImplementationTypes.Add(new CodeTypeReference(string.Empty)); fullEntryPointMethod.ImplementationTypes.Add(new CodeTypeReference("constraint1")); fullEntryPointMethod.ImplementationTypes.Add(new CodeTypeReference("constraint2`2", new CodeTypeReference("parameter1"), new CodeTypeReference("parameter2"))); fullEntryPointMethod.ReturnTypeCustomAttributes.Add(new CodeAttributeDeclaration("attribute1")); fullEntryPointMethod.ReturnTypeCustomAttributes.Add(new CodeAttributeDeclaration("attribute2", new CodeAttributeArgument("name", new CodePrimitiveExpression(1)))); fullEntryPointMethod.ReturnTypeCustomAttributes.Add(new CodeAttributeDeclaration("attribute3", new CodeAttributeArgument(null, new CodePrimitiveExpression(1)), new CodeAttributeArgument(string.Empty, new CodePrimitiveExpression(1)), new CodeAttributeArgument("name", new CodePrimitiveExpression(1)))); fullEntryPointMethod.ReturnTypeCustomAttributes.Add(new CodeAttributeDeclaration()); fullEntryPointMethod.ReturnTypeCustomAttributes.Add(new CodeAttributeDeclaration((string)null)); fullEntryPointMethod.ReturnTypeCustomAttributes.Add(new CodeAttributeDeclaration(string.Empty)); fullEntryPointMethod.ReturnTypeCustomAttributes.Add(new CodeAttributeDeclaration("0")); fullEntryPointMethod.ReturnTypeCustomAttributes.Add(new CodeAttributeDeclaration("name", new CodeAttributeArgument("0", new CodePrimitiveExpression(1)))); fullEntryPointMethod.ReturnTypeCustomAttributes.Add(new CodeAttributeDeclaration("name", new CodeAttributeArgument("name", new CodeExpression()))); fullEntryPointMethod.Statements.Add(new CodeMethodReturnStatement()); fullEntryPointMethod.Statements.Add(new CodeMethodReturnStatement { LinePragma = new CodeLinePragma() }); fullEntryPointMethod.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference("type"), "name")); fullEntryPointMethod.Parameters.Add(fullParameter); fullEntryPointMethod.Parameters.Add(new CodeParameterDeclarationExpression()); fullEntryPointMethod.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference(), "name")); fullEntryPointMethod.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference("0"), "name")); fullEntryPointMethod.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference("type"), null)); fullEntryPointMethod.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference("type"), string.Empty)); fullEntryPointMethod.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference("type"), "0")); fullEntryPointMethod.Parameters.Add(invalidParameterAttribute1); fullEntryPointMethod.Parameters.Add(invalidParameterAttribute2); fullEntryPointMethod.Parameters.Add(invalidParameterAttribute3); fullEntryPointMethod.Parameters.Add(invalidParameterAttribute4); fullEntryPointMethod.Parameters.Add(invalidParameterAttribute5); fullEntryPointMethod.Parameters.Add(invalidParameterAttribute6); fullEntryPointMethod.TypeParameters.Add(new CodeTypeParameter("parameter1")); fullEntryPointMethod.TypeParameters.Add(fullTypeParameter); yield return new object[] { fullEntryPointMethod }; // CodeConstructor. yield return new object[] { new CodeConstructor() }; yield return new object[] { new CodeConstructor { Name = null } }; yield return new object[] { new CodeConstructor { Name = string.Empty } }; yield return new object[] { new CodeConstructor { Name = "0" } }; yield return new object[] { new CodeConstructor { Name = "name", ReturnType = new CodeTypeReference() } }; yield return new object[] { new CodeConstructor { Name = "name", ReturnType = new CodeTypeReference("0") } }; yield return new object[] { new CodeConstructor { Name = "name", PrivateImplementationType = new CodeTypeReference() } }; yield return new object[] { new CodeConstructor { Name = "name", PrivateImplementationType = new CodeTypeReference("0") } }; var fullConstructor = new CodeConstructor { Name = "name", LinePragma = new CodeLinePragma(), ReturnType = new CodeTypeReference("returnType"), PrivateImplementationType = new CodeTypeReference("privateImplementationType") }; fullConstructor.Comments.Add(new CodeCommentStatement()); fullConstructor.Comments.Add(new CodeCommentStatement("0")); fullConstructor.Comments.Add(new CodeCommentStatement("text")); fullConstructor.StartDirectives.Add(new CodeChecksumPragma()); fullConstructor.StartDirectives.Add(new CodeChecksumPragma("fileName", Guid.NewGuid(), new byte[0])); fullConstructor.EndDirectives.Add(new CodeChecksumPragma()); fullConstructor.EndDirectives.Add(new CodeChecksumPragma("fileName", Guid.NewGuid(), new byte[0])); fullConstructor.CustomAttributes.Add(new CodeAttributeDeclaration("attribute1")); fullConstructor.CustomAttributes.Add(new CodeAttributeDeclaration("attribute2", new CodeAttributeArgument("name", new CodePrimitiveExpression(1)))); fullConstructor.CustomAttributes.Add(new CodeAttributeDeclaration("attribute3", new CodeAttributeArgument(null, new CodePrimitiveExpression(1)), new CodeAttributeArgument(string.Empty, new CodePrimitiveExpression(1)), new CodeAttributeArgument("name", new CodePrimitiveExpression(1)))); fullConstructor.ImplementationTypes.Add(new CodeTypeReference((string)null)); fullConstructor.ImplementationTypes.Add(new CodeTypeReference(string.Empty)); fullConstructor.ImplementationTypes.Add(new CodeTypeReference("constraint1")); fullConstructor.ImplementationTypes.Add(new CodeTypeReference("constraint2`2", new CodeTypeReference("parameter1"), new CodeTypeReference("parameter2"))); fullConstructor.ReturnTypeCustomAttributes.Add(new CodeAttributeDeclaration("attribute1")); fullConstructor.ReturnTypeCustomAttributes.Add(new CodeAttributeDeclaration("attribute2", new CodeAttributeArgument("name", new CodePrimitiveExpression(1)))); fullConstructor.ReturnTypeCustomAttributes.Add(new CodeAttributeDeclaration("attribute3", new CodeAttributeArgument(null, new CodePrimitiveExpression(1)), new CodeAttributeArgument(string.Empty, new CodePrimitiveExpression(1)), new CodeAttributeArgument("name", new CodePrimitiveExpression(1)))); fullConstructor.ReturnTypeCustomAttributes.Add(new CodeAttributeDeclaration()); fullConstructor.ReturnTypeCustomAttributes.Add(new CodeAttributeDeclaration((string)null)); fullConstructor.ReturnTypeCustomAttributes.Add(new CodeAttributeDeclaration(string.Empty)); fullConstructor.ReturnTypeCustomAttributes.Add(new CodeAttributeDeclaration("0")); fullConstructor.ReturnTypeCustomAttributes.Add(new CodeAttributeDeclaration("name", new CodeAttributeArgument("0", new CodePrimitiveExpression(1)))); fullConstructor.ReturnTypeCustomAttributes.Add(new CodeAttributeDeclaration("name", new CodeAttributeArgument("name", new CodeExpression()))); fullConstructor.Statements.Add(new CodeMethodReturnStatement()); fullConstructor.Statements.Add(new CodeMethodReturnStatement { LinePragma = new CodeLinePragma() }); fullConstructor.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference("type"), "name")); fullConstructor.Parameters.Add(fullParameter); fullConstructor.TypeParameters.Add(new CodeTypeParameter("parameter1")); fullConstructor.TypeParameters.Add(fullTypeParameter); fullConstructor.BaseConstructorArgs.Add(new CodePrimitiveExpression(1)); fullConstructor.ChainedConstructorArgs.Add(new CodePrimitiveExpression(1)); yield return new object[] { fullConstructor }; // CodeTypeConstructor. yield return new object[] { new CodeTypeConstructor() }; yield return new object[] { new CodeTypeConstructor { Name = null } }; yield return new object[] { new CodeTypeConstructor { Name = string.Empty } }; yield return new object[] { new CodeTypeConstructor { Name = "name" } }; yield return new object[] { new CodeTypeConstructor { Name = "0" } }; yield return new object[] { new CodeTypeConstructor { Name = "name", ReturnType = new CodeTypeReference() } }; yield return new object[] { new CodeTypeConstructor { Name = "name", ReturnType = new CodeTypeReference("0") } }; yield return new object[] { new CodeTypeConstructor { Name = "name", PrivateImplementationType = new CodeTypeReference() } }; yield return new object[] { new CodeTypeConstructor { Name = "name", PrivateImplementationType = new CodeTypeReference("0") } }; var abstractTypeConstructor = new CodeTypeConstructor { Name = "name", Attributes = MemberAttributes.Abstract }; abstractTypeConstructor.Statements.Add(new CodeMethodReturnStatement()); yield return new object[] { abstractTypeConstructor }; var fullTypeConstructor = new CodeTypeConstructor { Name = "name", LinePragma = new CodeLinePragma(), ReturnType = new CodeTypeReference("returnType"), PrivateImplementationType = new CodeTypeReference("privateImplementationType") }; fullTypeConstructor.Comments.Add(new CodeCommentStatement()); fullTypeConstructor.Comments.Add(new CodeCommentStatement("0")); fullTypeConstructor.Comments.Add(new CodeCommentStatement("text")); fullTypeConstructor.StartDirectives.Add(new CodeChecksumPragma()); fullTypeConstructor.StartDirectives.Add(new CodeChecksumPragma("fileName", Guid.NewGuid(), new byte[0])); fullTypeConstructor.EndDirectives.Add(new CodeChecksumPragma()); fullTypeConstructor.EndDirectives.Add(new CodeChecksumPragma("fileName", Guid.NewGuid(), new byte[0])); fullTypeConstructor.CustomAttributes.Add(new CodeAttributeDeclaration("attribute1")); fullTypeConstructor.CustomAttributes.Add(new CodeAttributeDeclaration("attribute2", new CodeAttributeArgument("name", new CodePrimitiveExpression(1)))); fullTypeConstructor.CustomAttributes.Add(new CodeAttributeDeclaration("attribute3", new CodeAttributeArgument(null, new CodePrimitiveExpression(1)), new CodeAttributeArgument(string.Empty, new CodePrimitiveExpression(1)), new CodeAttributeArgument("name", new CodePrimitiveExpression(1)))); fullTypeConstructor.CustomAttributes.Add(new CodeAttributeDeclaration()); fullTypeConstructor.CustomAttributes.Add(new CodeAttributeDeclaration((string)null)); fullTypeConstructor.CustomAttributes.Add(new CodeAttributeDeclaration(string.Empty)); fullTypeConstructor.CustomAttributes.Add(new CodeAttributeDeclaration("0")); fullTypeConstructor.CustomAttributes.Add(new CodeAttributeDeclaration("name", new CodeAttributeArgument("0", new CodePrimitiveExpression(1)))); fullTypeConstructor.CustomAttributes.Add(new CodeAttributeDeclaration("name", new CodeAttributeArgument("name", new CodeExpression()))); fullTypeConstructor.ImplementationTypes.Add(new CodeTypeReference((string)null)); fullTypeConstructor.ImplementationTypes.Add(new CodeTypeReference(string.Empty)); fullTypeConstructor.ImplementationTypes.Add(new CodeTypeReference("constraint1")); fullTypeConstructor.ImplementationTypes.Add(new CodeTypeReference("constraint2`2", new CodeTypeReference("parameter1"), new CodeTypeReference("parameter2"))); fullTypeConstructor.ReturnTypeCustomAttributes.Add(new CodeAttributeDeclaration("attribute1")); fullTypeConstructor.ReturnTypeCustomAttributes.Add(new CodeAttributeDeclaration("attribute2", new CodeAttributeArgument("name", new CodePrimitiveExpression(1)))); fullTypeConstructor.ReturnTypeCustomAttributes.Add(new CodeAttributeDeclaration("attribute3", new CodeAttributeArgument(null, new CodePrimitiveExpression(1)), new CodeAttributeArgument(string.Empty, new CodePrimitiveExpression(1)), new CodeAttributeArgument("name", new CodePrimitiveExpression(1)))); fullTypeConstructor.ReturnTypeCustomAttributes.Add(new CodeAttributeDeclaration()); fullTypeConstructor.ReturnTypeCustomAttributes.Add(new CodeAttributeDeclaration((string)null)); fullTypeConstructor.ReturnTypeCustomAttributes.Add(new CodeAttributeDeclaration(string.Empty)); fullTypeConstructor.ReturnTypeCustomAttributes.Add(new CodeAttributeDeclaration("0")); fullTypeConstructor.ReturnTypeCustomAttributes.Add(new CodeAttributeDeclaration("name", new CodeAttributeArgument("0", new CodePrimitiveExpression(1)))); fullTypeConstructor.ReturnTypeCustomAttributes.Add(new CodeAttributeDeclaration("name", new CodeAttributeArgument("name", new CodeExpression()))); fullTypeConstructor.Statements.Add(new CodeMethodReturnStatement()); fullTypeConstructor.Statements.Add(new CodeMethodReturnStatement { LinePragma = new CodeLinePragma() }); fullTypeConstructor.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference("type"), "name")); fullTypeConstructor.Parameters.Add(fullParameter); fullTypeConstructor.Parameters.Add(new CodeParameterDeclarationExpression()); fullTypeConstructor.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference(), "name")); fullTypeConstructor.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference("0"), "name")); fullTypeConstructor.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference("type"), null)); fullTypeConstructor.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference("type"), string.Empty)); fullTypeConstructor.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference("type"), "0")); fullTypeConstructor.Parameters.Add(invalidParameterAttribute1); fullTypeConstructor.Parameters.Add(invalidParameterAttribute2); fullTypeConstructor.Parameters.Add(invalidParameterAttribute3); fullTypeConstructor.Parameters.Add(invalidParameterAttribute4); fullTypeConstructor.Parameters.Add(invalidParameterAttribute5); fullTypeConstructor.Parameters.Add(invalidParameterAttribute6); fullTypeConstructor.TypeParameters.Add(new CodeTypeParameter("parameter1")); fullTypeConstructor.TypeParameters.Add(fullTypeParameter); yield return new object[] { fullTypeConstructor }; // CodeMemberProperty. yield return new object[] { new CodeMemberProperty { Name = "name" } }; yield return new object[] { new CodeMemberProperty { Name = "item" } }; yield return new object[] { new CodeMemberProperty { Name = "Item" } }; var abstractProperty = new CodeMemberProperty { Name = "name", Attributes = MemberAttributes.Abstract }; abstractProperty.GetStatements.Add(new CodeStatement()); abstractProperty.SetStatements.Add(new CodeStatement()); yield return new object[] { abstractProperty }; var fullItemPropertyUpper = new CodeMemberProperty { Name = "Item", PrivateImplementationType = new CodeTypeReference("implementationType") }; fullItemPropertyUpper.Comments.Add(new CodeCommentStatement()); fullItemPropertyUpper.Comments.Add(new CodeCommentStatement("0")); fullItemPropertyUpper.Comments.Add(new CodeCommentStatement("text")); fullItemPropertyUpper.StartDirectives.Add(new CodeChecksumPragma()); fullItemPropertyUpper.StartDirectives.Add(new CodeChecksumPragma("fileName", Guid.NewGuid(), new byte[0])); fullItemPropertyUpper.EndDirectives.Add(new CodeChecksumPragma()); fullItemPropertyUpper.EndDirectives.Add(new CodeChecksumPragma("fileName", Guid.NewGuid(), new byte[0])); fullItemPropertyUpper.CustomAttributes.Add(new CodeAttributeDeclaration("attribute1")); fullItemPropertyUpper.CustomAttributes.Add(new CodeAttributeDeclaration("attribute2", new CodeAttributeArgument("name", new CodePrimitiveExpression(1)))); fullItemPropertyUpper.CustomAttributes.Add(new CodeAttributeDeclaration("attribute3", new CodeAttributeArgument(null, new CodePrimitiveExpression(1)), new CodeAttributeArgument(string.Empty, new CodePrimitiveExpression(1)), new CodeAttributeArgument("name", new CodePrimitiveExpression(1)))); fullItemPropertyUpper.GetStatements.Add(new CodeMethodReturnStatement()); fullItemPropertyUpper.GetStatements.Add(new CodeMethodReturnStatement { LinePragma = new CodeLinePragma() }); fullItemPropertyUpper.SetStatements.Add(new CodeMethodReturnStatement()); fullItemPropertyUpper.SetStatements.Add(new CodeMethodReturnStatement { LinePragma = new CodeLinePragma() }); fullItemPropertyUpper.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference("type"), "name")); fullItemPropertyUpper.Parameters.Add(fullParameter); yield return new object[] { fullItemPropertyUpper }; var fullItemPropertyLower = new CodeMemberProperty { Name = "Item", PrivateImplementationType = new CodeTypeReference("implementationType") }; fullItemPropertyLower.Comments.Add(new CodeCommentStatement()); fullItemPropertyLower.Comments.Add(new CodeCommentStatement("0")); fullItemPropertyLower.Comments.Add(new CodeCommentStatement("text")); fullItemPropertyLower.StartDirectives.Add(new CodeChecksumPragma()); fullItemPropertyLower.StartDirectives.Add(new CodeChecksumPragma("fileName", Guid.NewGuid(), new byte[0])); fullItemPropertyLower.EndDirectives.Add(new CodeChecksumPragma()); fullItemPropertyLower.EndDirectives.Add(new CodeChecksumPragma("fileName", Guid.NewGuid(), new byte[0])); fullItemPropertyLower.CustomAttributes.Add(new CodeAttributeDeclaration("attribute1")); fullItemPropertyLower.CustomAttributes.Add(new CodeAttributeDeclaration("attribute2", new CodeAttributeArgument("name", new CodePrimitiveExpression(1)))); fullItemPropertyLower.CustomAttributes.Add(new CodeAttributeDeclaration("attribute3", new CodeAttributeArgument(null, new CodePrimitiveExpression(1)), new CodeAttributeArgument(string.Empty, new CodePrimitiveExpression(1)), new CodeAttributeArgument("name", new CodePrimitiveExpression(1)))); fullItemPropertyLower.GetStatements.Add(new CodeMethodReturnStatement()); fullItemPropertyLower.GetStatements.Add(new CodeMethodReturnStatement { LinePragma = new CodeLinePragma() }); fullItemPropertyLower.SetStatements.Add(new CodeMethodReturnStatement()); fullItemPropertyLower.SetStatements.Add(new CodeMethodReturnStatement { LinePragma = new CodeLinePragma() }); fullItemPropertyLower.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference("type"), "name")); fullItemPropertyLower.Parameters.Add(fullParameter); yield return new object[] { fullItemPropertyLower }; var fullProperty = new CodeMemberProperty { Name = "name", PrivateImplementationType = new CodeTypeReference("implementationType") }; fullProperty.Comments.Add(new CodeCommentStatement()); fullProperty.Comments.Add(new CodeCommentStatement("0")); fullProperty.Comments.Add(new CodeCommentStatement("text")); fullProperty.StartDirectives.Add(new CodeChecksumPragma()); fullProperty.StartDirectives.Add(new CodeChecksumPragma("fileName", Guid.NewGuid(), new byte[0])); fullProperty.EndDirectives.Add(new CodeChecksumPragma()); fullProperty.EndDirectives.Add(new CodeChecksumPragma("fileName", Guid.NewGuid(), new byte[0])); fullProperty.CustomAttributes.Add(new CodeAttributeDeclaration("attribute1")); fullProperty.CustomAttributes.Add(new CodeAttributeDeclaration("attribute2", new CodeAttributeArgument("name", new CodePrimitiveExpression(1)))); fullProperty.CustomAttributes.Add(new CodeAttributeDeclaration("attribute3", new CodeAttributeArgument(null, new CodePrimitiveExpression(1)), new CodeAttributeArgument(string.Empty, new CodePrimitiveExpression(1)), new CodeAttributeArgument("name", new CodePrimitiveExpression(1)))); fullProperty.GetStatements.Add(new CodeMethodReturnStatement()); fullProperty.GetStatements.Add(new CodeMethodReturnStatement { LinePragma = new CodeLinePragma() }); fullProperty.SetStatements.Add(new CodeMethodReturnStatement()); fullProperty.SetStatements.Add(new CodeMethodReturnStatement { LinePragma = new CodeLinePragma() }); fullProperty.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference("type"), "name")); fullProperty.Parameters.Add(fullParameter); fullProperty.Parameters.Add(new CodeParameterDeclarationExpression()); fullProperty.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference(), "name")); fullProperty.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference("0"), "name")); fullProperty.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference("type"), null)); fullProperty.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference("type"), string.Empty)); fullProperty.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference("type"), "0")); fullProperty.Parameters.Add(invalidParameterAttribute1); fullProperty.Parameters.Add(invalidParameterAttribute2); fullProperty.Parameters.Add(invalidParameterAttribute3); fullProperty.Parameters.Add(invalidParameterAttribute4); fullProperty.Parameters.Add(invalidParameterAttribute5); fullProperty.Parameters.Add(invalidParameterAttribute6); yield return new object[] { fullProperty }; // CodeSnippetTypeMember. yield return new object[] { new CodeSnippetTypeMember() }; yield return new object[] { new CodeSnippetTypeMember(null) }; yield return new object[] { new CodeSnippetTypeMember(string.Empty) }; yield return new object[] { new CodeSnippetTypeMember("0") }; yield return new object[] { new CodeSnippetTypeMember("text") }; var fullSnippetTypeMember = new CodeSnippetTypeMember("text"); fullSnippetTypeMember.Comments.Add(new CodeCommentStatement()); fullSnippetTypeMember.Comments.Add(new CodeCommentStatement("0")); fullSnippetTypeMember.Comments.Add(new CodeCommentStatement("text")); fullSnippetTypeMember.StartDirectives.Add(new CodeChecksumPragma()); fullSnippetTypeMember.StartDirectives.Add(new CodeChecksumPragma("fileName", Guid.NewGuid(), new byte[0])); fullSnippetTypeMember.EndDirectives.Add(new CodeChecksumPragma()); fullSnippetTypeMember.EndDirectives.Add(new CodeChecksumPragma("fileName", Guid.NewGuid(), new byte[0])); fullSnippetTypeMember.CustomAttributes.Add(new CodeAttributeDeclaration("attribute1")); fullSnippetTypeMember.CustomAttributes.Add(new CodeAttributeDeclaration("attribute2", new CodeAttributeArgument("name", new CodePrimitiveExpression(1)))); fullSnippetTypeMember.CustomAttributes.Add(new CodeAttributeDeclaration("attribute3", new CodeAttributeArgument(null, new CodePrimitiveExpression(1)), new CodeAttributeArgument(string.Empty, new CodePrimitiveExpression(1)), new CodeAttributeArgument("name", new CodePrimitiveExpression(1)))); yield return new object[] { fullSnippetTypeMember }; // CodeTypeDeclaration. yield return new object[] { new CodeTypeDeclaration("name") }; var interfaceTypeDeclaration = new CodeTypeDeclaration("name") { IsInterface = true }; var interfaceMethod = new CodeMemberMethod { Name = "name" }; interfaceMethod.Statements.Add(new CodeStatement()); var interfaceProperty = new CodeMemberProperty { Name = "name", PrivateImplementationType = new CodeTypeReference("0") }; interfaceProperty.GetStatements.Add(new CodeStatement()); interfaceProperty.SetStatements.Add(new CodeStatement()); interfaceTypeDeclaration.Members.Add(interfaceMethod); interfaceTypeDeclaration.Members.Add(interfaceProperty); yield return new object[] { interfaceTypeDeclaration }; var fullTypeDeclaration = new CodeTypeDeclaration("name"); fullTypeDeclaration.Comments.Add(new CodeCommentStatement()); fullTypeDeclaration.Comments.Add(new CodeCommentStatement("0")); fullTypeDeclaration.Comments.Add(new CodeCommentStatement("text")); fullTypeDeclaration.CustomAttributes.Add(new CodeAttributeDeclaration("attribute1")); fullTypeDeclaration.CustomAttributes.Add(new CodeAttributeDeclaration("attribute2", new CodeAttributeArgument("name", new CodePrimitiveExpression(1)))); fullTypeDeclaration.CustomAttributes.Add(new CodeAttributeDeclaration("attribute3", new CodeAttributeArgument(null, new CodePrimitiveExpression(1)), new CodeAttributeArgument(string.Empty, new CodePrimitiveExpression(1)), new CodeAttributeArgument("name", new CodePrimitiveExpression(1)))); fullTypeDeclaration.TypeParameters.Add(new CodeTypeParameter("parameter1")); fullTypeDeclaration.TypeParameters.Add(fullTypeParameter); fullTypeDeclaration.BaseTypes.Add(new CodeTypeReference((string)null)); fullTypeDeclaration.BaseTypes.Add(new CodeTypeReference(string.Empty)); fullTypeDeclaration.BaseTypes.Add(new CodeTypeReference("baseType1")); fullTypeDeclaration.BaseTypes.Add(new CodeTypeReference("baseType2`2", new CodeTypeReference("parameter1"), new CodeTypeReference("parameter2"))); fullTypeDeclaration.Members.Add(new CodeMemberEvent()); fullTypeDeclaration.Members.Add(new CodeMemberEvent { Name = "0" }); fullTypeDeclaration.Members.Add(new CodeMemberEvent { Name = "name", PrivateImplementationType = new CodeTypeReference() }); fullTypeDeclaration.Members.Add(new CodeMemberEvent { Name = "name", PrivateImplementationType = new CodeTypeReference("0") }); fullTypeDeclaration.Members.Add(fullEvent); fullTypeDeclaration.Members.Add(new CodeMemberField(new CodeTypeReference("type"), "name")); fullTypeDeclaration.Members.Add(fullField); fullTypeDeclaration.Members.Add(new CodeMemberMethod { Name = "name" }); fullTypeDeclaration.Members.Add(abstractMethod); fullTypeDeclaration.Members.Add(fullMethod); fullTypeDeclaration.Members.Add(new CodeEntryPointMethod()); fullTypeDeclaration.Members.Add(new CodeEntryPointMethod { Name = null }); fullTypeDeclaration.Members.Add(new CodeEntryPointMethod { Name = string.Empty }); fullTypeDeclaration.Members.Add(new CodeEntryPointMethod { Name = "name" }); fullTypeDeclaration.Members.Add(new CodeEntryPointMethod { Name = "0" }); fullTypeDeclaration.Members.Add(new CodeEntryPointMethod { Name = "name", ReturnType = new CodeTypeReference() }); fullTypeDeclaration.Members.Add(new CodeEntryPointMethod { Name = "name", ReturnType = new CodeTypeReference("0") }); fullTypeDeclaration.Members.Add(new CodeEntryPointMethod { Name = "name", PrivateImplementationType = new CodeTypeReference() }); fullTypeDeclaration.Members.Add(new CodeEntryPointMethod { Name = "name", PrivateImplementationType = new CodeTypeReference("0") }); fullTypeDeclaration.Members.Add(abstractEntryPointMethod); fullTypeDeclaration.Members.Add(fullEntryPointMethod); fullTypeDeclaration.Members.Add(new CodeConstructor()); fullTypeDeclaration.Members.Add(new CodeConstructor { Name = null }); fullTypeDeclaration.Members.Add(new CodeConstructor { Name = string.Empty }); fullTypeDeclaration.Members.Add(new CodeConstructor { Name = "0" }); fullTypeDeclaration.Members.Add(new CodeConstructor { Name = "name", ReturnType = new CodeTypeReference() }); fullTypeDeclaration.Members.Add(new CodeConstructor { Name = "name", ReturnType = new CodeTypeReference("0") }); fullTypeDeclaration.Members.Add(new CodeConstructor { Name = "name", PrivateImplementationType = new CodeTypeReference() }); fullTypeDeclaration.Members.Add(new CodeConstructor { Name = "name", PrivateImplementationType = new CodeTypeReference("0") }); fullTypeDeclaration.Members.Add(fullConstructor); fullTypeDeclaration.Members.Add(new CodeTypeConstructor()); fullTypeDeclaration.Members.Add(new CodeTypeConstructor { Name = null }); fullTypeDeclaration.Members.Add(new CodeTypeConstructor { Name = string.Empty }); fullTypeDeclaration.Members.Add(new CodeTypeConstructor { Name = "name" }); fullTypeDeclaration.Members.Add(new CodeTypeConstructor { Name = "0" }); fullTypeDeclaration.Members.Add(new CodeTypeConstructor { Name = "name", ReturnType = new CodeTypeReference() }); fullTypeDeclaration.Members.Add(new CodeTypeConstructor { Name = "name", ReturnType = new CodeTypeReference("0") }); fullTypeDeclaration.Members.Add(new CodeTypeConstructor { Name = "name", PrivateImplementationType = new CodeTypeReference() }); fullTypeDeclaration.Members.Add(new CodeTypeConstructor { Name = "name", PrivateImplementationType = new CodeTypeReference("0") }); fullTypeDeclaration.Members.Add(abstractTypeConstructor); fullTypeDeclaration.Members.Add(fullTypeConstructor); fullTypeDeclaration.Members.Add(new CodeMemberProperty { Name = "name" } ); fullTypeDeclaration.Members.Add(new CodeMemberProperty { Name = "item" } ); fullTypeDeclaration.Members.Add(abstractProperty); fullTypeDeclaration.Members.Add(fullItemPropertyLower); fullTypeDeclaration.Members.Add(fullItemPropertyUpper); fullTypeDeclaration.Members.Add(fullProperty); fullTypeDeclaration.Members.Add(new CodeSnippetTypeMember()); fullTypeDeclaration.Members.Add(new CodeSnippetTypeMember(null)); fullTypeDeclaration.Members.Add(new CodeSnippetTypeMember(string.Empty)); fullTypeDeclaration.Members.Add(new CodeSnippetTypeMember("text")); fullTypeDeclaration.Members.Add(fullSnippetTypeMember); yield return new object[] { fullTypeDeclaration }; // CodeTypeDelegate. yield return new object[] { new CodeTypeDelegate("name") }; var fullDelegate = new CodeTypeDelegate("name") { ReturnType = new CodeTypeReference("returnType") }; fullDelegate.Comments.Add(new CodeCommentStatement()); fullDelegate.Comments.Add(new CodeCommentStatement("0")); fullDelegate.Comments.Add(new CodeCommentStatement("text")); fullDelegate.CustomAttributes.Add(new CodeAttributeDeclaration("attribute1")); fullDelegate.CustomAttributes.Add(new CodeAttributeDeclaration("attribute2", new CodeAttributeArgument("name", new CodePrimitiveExpression(1)))); fullDelegate.CustomAttributes.Add(new CodeAttributeDeclaration("attribute3", new CodeAttributeArgument(null, new CodePrimitiveExpression(1)), new CodeAttributeArgument(string.Empty, new CodePrimitiveExpression(1)), new CodeAttributeArgument("name", new CodePrimitiveExpression(1)))); fullDelegate.TypeParameters.Add(new CodeTypeParameter("parameter1")); fullDelegate.TypeParameters.Add(fullTypeParameter); fullDelegate.BaseTypes.Add(new CodeTypeReference((string)null)); fullDelegate.BaseTypes.Add(new CodeTypeReference(string.Empty)); fullDelegate.BaseTypes.Add(new CodeTypeReference("baseType1")); fullDelegate.BaseTypes.Add(new CodeTypeReference("baseType2`2", new CodeTypeReference("parameter1"), new CodeTypeReference("parameter2"))); fullDelegate.Members.Add(new CodeMemberEvent()); fullDelegate.Members.Add(new CodeMemberEvent { Name = "0" }); fullDelegate.Members.Add(new CodeMemberEvent { Name = "name", PrivateImplementationType = new CodeTypeReference() }); fullDelegate.Members.Add(new CodeMemberEvent { Name = "name", PrivateImplementationType = new CodeTypeReference("0") }); fullDelegate.Members.Add(fullEvent); fullDelegate.Members.Add(new CodeMemberField(new CodeTypeReference("type"), "name")); fullDelegate.Members.Add(fullField); fullDelegate.Members.Add(new CodeMemberMethod { Name = "name" }); fullDelegate.Members.Add(abstractMethod); fullDelegate.Members.Add(fullMethod); fullDelegate.Members.Add(new CodeEntryPointMethod()); fullDelegate.Members.Add(new CodeEntryPointMethod { Name = null }); fullDelegate.Members.Add(new CodeEntryPointMethod { Name = string.Empty }); fullDelegate.Members.Add(new CodeEntryPointMethod { Name = "name" }); fullDelegate.Members.Add(new CodeEntryPointMethod { Name = "0" }); fullDelegate.Members.Add(new CodeEntryPointMethod { Name = "name", ReturnType = new CodeTypeReference() }); fullDelegate.Members.Add(new CodeEntryPointMethod { Name = "name", ReturnType = new CodeTypeReference("0") }); fullDelegate.Members.Add(new CodeEntryPointMethod { Name = "name", PrivateImplementationType = new CodeTypeReference() }); fullDelegate.Members.Add(new CodeEntryPointMethod { Name = "name", PrivateImplementationType = new CodeTypeReference("0") }); fullDelegate.Members.Add(abstractEntryPointMethod); fullDelegate.Members.Add(fullEntryPointMethod); fullDelegate.Members.Add(new CodeConstructor()); fullDelegate.Members.Add(new CodeConstructor { Name = null }); fullDelegate.Members.Add(new CodeConstructor { Name = string.Empty }); fullDelegate.Members.Add(new CodeConstructor { Name = "0" }); fullDelegate.Members.Add(new CodeConstructor { Name = "name", ReturnType = new CodeTypeReference() }); fullDelegate.Members.Add(new CodeConstructor { Name = "name", ReturnType = new CodeTypeReference("0") }); fullDelegate.Members.Add(new CodeConstructor { Name = "name", PrivateImplementationType = new CodeTypeReference() }); fullDelegate.Members.Add(new CodeConstructor { Name = "name", PrivateImplementationType = new CodeTypeReference("0") }); fullDelegate.Members.Add(fullConstructor); fullDelegate.Members.Add(new CodeTypeConstructor()); fullDelegate.Members.Add(new CodeTypeConstructor { Name = null }); fullDelegate.Members.Add(new CodeTypeConstructor { Name = string.Empty }); fullDelegate.Members.Add(new CodeTypeConstructor { Name = "name" }); fullDelegate.Members.Add(new CodeTypeConstructor { Name = "0" }); fullDelegate.Members.Add(new CodeTypeConstructor { Name = "name", ReturnType = new CodeTypeReference() }); fullDelegate.Members.Add(new CodeTypeConstructor { Name = "name", ReturnType = new CodeTypeReference("0") }); fullDelegate.Members.Add(new CodeTypeConstructor { Name = "name", PrivateImplementationType = new CodeTypeReference() }); fullDelegate.Members.Add(new CodeTypeConstructor { Name = "name", PrivateImplementationType = new CodeTypeReference("0") }); fullDelegate.Members.Add(abstractTypeConstructor); fullDelegate.Members.Add(fullTypeConstructor); fullDelegate.Members.Add(new CodeMemberProperty { Name = "name" } ); fullDelegate.Members.Add(new CodeMemberProperty { Name = "item" } ); fullDelegate.Members.Add(abstractProperty); fullDelegate.Members.Add(fullItemPropertyLower); fullDelegate.Members.Add(fullItemPropertyUpper); fullDelegate.Members.Add(fullProperty); fullDelegate.Members.Add(new CodeSnippetTypeMember()); fullDelegate.Members.Add(new CodeSnippetTypeMember(null)); fullDelegate.Members.Add(new CodeSnippetTypeMember(string.Empty)); fullDelegate.Members.Add(new CodeSnippetTypeMember("text")); fullDelegate.Members.Add(fullSnippetTypeMember); fullDelegate.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference("type"), "name")); fullDelegate.Parameters.Add(fullParameter); yield return new object[] { fullDelegate }; // CodeNamespace. yield return new object[] { new CodeNamespace() }; yield return new object[] { new CodeNamespace(null) }; yield return new object[] { new CodeNamespace(string.Empty) }; yield return new object[] { new CodeNamespace("name") }; var fullNamespace = new CodeNamespace("name"); fullNamespace.Comments.Add(new CodeCommentStatement()); fullNamespace.Comments.Add(new CodeCommentStatement("0")); fullNamespace.Comments.Add(new CodeCommentStatement("text")); fullNamespace.Imports.Add(new CodeNamespaceImport("nameSpace1")); fullNamespace.Imports.Add(fullNamespaceImport); fullNamespace.Types.Add(new CodeTypeDeclaration("name")); fullNamespace.Types.Add(interfaceTypeDeclaration); fullNamespace.Types.Add(fullTypeDeclaration); yield return new object[] { fullNamespace }; // CodeCompileUnit. yield return new object[] { new CodeCompileUnit() }; var fullCompileUnit = new CodeCompileUnit(); fullCompileUnit.StartDirectives.Add(new CodeChecksumPragma()); fullCompileUnit.StartDirectives.Add(new CodeChecksumPragma("fileName", Guid.NewGuid(), new byte[0])); fullCompileUnit.EndDirectives.Add(new CodeChecksumPragma()); fullCompileUnit.EndDirectives.Add(new CodeChecksumPragma("fileName", Guid.NewGuid(), new byte[0])); fullCompileUnit.AssemblyCustomAttributes.Add(new CodeAttributeDeclaration("attribute1")); fullCompileUnit.AssemblyCustomAttributes.Add(new CodeAttributeDeclaration("attribute2", new CodeAttributeArgument("name", new CodePrimitiveExpression(1)))); fullCompileUnit.AssemblyCustomAttributes.Add(new CodeAttributeDeclaration("attribute3", new CodeAttributeArgument(null, new CodePrimitiveExpression(1)), new CodeAttributeArgument(string.Empty, new CodePrimitiveExpression(1)), new CodeAttributeArgument("name", new CodePrimitiveExpression(1)))); fullCompileUnit.Namespaces.Add(new CodeNamespace()); fullCompileUnit.Namespaces.Add(new CodeNamespace(null)); fullCompileUnit.Namespaces.Add(new CodeNamespace(string.Empty)); fullCompileUnit.Namespaces.Add(fullNamespace); fullCompileUnit.ReferencedAssemblies.Add(""); fullCompileUnit.ReferencedAssemblies.Add("0"); fullCompileUnit.ReferencedAssemblies.Add("assembly"); yield return new object[] { fullCompileUnit }; // CodeSnippetCompileUnit. yield return new object[] { new CodeSnippetCompileUnit() }; yield return new object[] { new CodeSnippetCompileUnit(null) }; yield return new object[] { new CodeSnippetCompileUnit("") }; yield return new object[] { new CodeSnippetCompileUnit("0") }; yield return new object[] { new CodeSnippetCompileUnit("value") }; var fullSnippetCompileUnit = new CodeSnippetCompileUnit("value") { LinePragma = new CodeLinePragma() }; fullSnippetCompileUnit.StartDirectives.Add(new CodeChecksumPragma()); fullSnippetCompileUnit.StartDirectives.Add(new CodeChecksumPragma("fileName", Guid.NewGuid(), new byte[0])); fullSnippetCompileUnit.EndDirectives.Add(new CodeChecksumPragma()); fullSnippetCompileUnit.EndDirectives.Add(new CodeChecksumPragma("fileName", Guid.NewGuid(), new byte[0])); fullSnippetCompileUnit.AssemblyCustomAttributes.Add(new CodeAttributeDeclaration("attribute1")); fullSnippetCompileUnit.AssemblyCustomAttributes.Add(new CodeAttributeDeclaration("attribute2", new CodeAttributeArgument("name", new CodePrimitiveExpression(1)))); fullSnippetCompileUnit.AssemblyCustomAttributes.Add(new CodeAttributeDeclaration("attribute3", new CodeAttributeArgument(null, new CodePrimitiveExpression(1)), new CodeAttributeArgument(string.Empty, new CodePrimitiveExpression(1)), new CodeAttributeArgument("name", new CodePrimitiveExpression(1)))); fullSnippetCompileUnit.AssemblyCustomAttributes.Add(new CodeAttributeDeclaration()); fullSnippetCompileUnit.AssemblyCustomAttributes.Add(new CodeAttributeDeclaration((string)null)); fullSnippetCompileUnit.AssemblyCustomAttributes.Add(new CodeAttributeDeclaration(string.Empty)); fullSnippetCompileUnit.AssemblyCustomAttributes.Add(new CodeAttributeDeclaration("0")); fullSnippetCompileUnit.AssemblyCustomAttributes.Add(new CodeAttributeDeclaration("name", new CodeAttributeArgument("0", new CodePrimitiveExpression(1)))); fullSnippetCompileUnit.AssemblyCustomAttributes.Add(new CodeAttributeDeclaration("name", new CodeAttributeArgument("name", new CodeExpression()))); fullSnippetCompileUnit.Namespaces.Add(new CodeNamespace()); fullSnippetCompileUnit.Namespaces.Add(new CodeNamespace(null)); fullSnippetCompileUnit.Namespaces.Add(new CodeNamespace(string.Empty)); fullSnippetCompileUnit.Namespaces.Add(new CodeNamespace("0")); fullSnippetCompileUnit.Namespaces.Add(fullNamespace); fullSnippetCompileUnit.ReferencedAssemblies.Add(""); fullSnippetCompileUnit.ReferencedAssemblies.Add("0"); fullSnippetCompileUnit.ReferencedAssemblies.Add("assembly"); yield return new object[] { fullSnippetCompileUnit }; // CodeTypeReference. yield return new object[] { new CodeTypeReference((string)null) }; yield return new object[] { new CodeTypeReference(string.Empty) }; yield return new object[] { new CodeTypeReference("name") }; yield return new object[] { new CodeTypeReference("name`") }; yield return new object[] { new CodeTypeReference("name`1") }; yield return new object[] { new CodeTypeReference("name`2[]") }; var fullTypeReference = new CodeTypeReference("name`2"); fullTypeReference.TypeArguments.Add("type1"); fullTypeReference.TypeArguments.Add("type2"); yield return new object[] { fullTypeReference }; // CodeArrayCreateExpression. yield return new object[] { new CodeArrayCreateExpression() }; yield return new object[] { new CodeArrayCreateExpression(new CodeTypeReference((string)null)) }; yield return new object[] { new CodeArrayCreateExpression(new CodeTypeReference(string.Empty)) }; yield return new object[] { new CodeArrayCreateExpression(new CodeTypeReference("name")) }; yield return new object[] { new CodeArrayCreateExpression(new CodeTypeReference("name")) { SizeExpression = new CodePrimitiveExpression(1) } } ; yield return new object[] { new CodeArrayCreateExpression(new CodeTypeReference("name"), new CodeExpression[] { new CodePrimitiveExpression() }) { SizeExpression = new CodePrimitiveExpression(1) } }; yield return new object[] { new CodeArrayCreateExpression(new CodeTypeReference("name"), new CodeExpression[] { new CodePrimitiveExpression(1) }) }; yield return new object[] { new CodeArrayCreateExpression(new CodeTypeReference("name"), new CodeExpression[] { new CodePrimitiveExpression(1) }) { SizeExpression = new CodeExpression() } }; // CodeBaseReferenceExpression. yield return new object[] { new CodeBaseReferenceExpression() }; // CodeBinaryOperatorExpression. yield return new object[] { new CodeBinaryOperatorExpression(new CodePrimitiveExpression(1), CodeBinaryOperatorType.Add, new CodePrimitiveExpression(2)) }; // CodeCastExpression. yield return new object[] { new CodeCastExpression(new CodeTypeReference((string)null), new CodePrimitiveExpression(1)) }; yield return new object[] { new CodeCastExpression(new CodeTypeReference(string.Empty), new CodePrimitiveExpression(1)) }; yield return new object[] { new CodeCastExpression(new CodeTypeReference("name"), new CodePrimitiveExpression(1)) }; // CodeDefaultValueExpression. yield return new object[] { new CodeDefaultValueExpression() }; yield return new object[] { new CodeDefaultValueExpression(new CodeTypeReference((string)null)) }; yield return new object[] { new CodeDefaultValueExpression(new CodeTypeReference(string.Empty)) }; yield return new object[] { new CodeDefaultValueExpression(new CodeTypeReference("name")) }; // CodeDelegateCreateExpression. yield return new object[] { new CodeDelegateCreateExpression(new CodeTypeReference("name"), new CodePrimitiveExpression(1), "methodName") }; // CodeFieldReferenceExpression. yield return new object[] { new CodeFieldReferenceExpression(null, "name") }; yield return new object[] { new CodeFieldReferenceExpression(new CodePrimitiveExpression(1), "name") }; // CodeArgumentReferenceExpression. yield return new object[] { new CodeArgumentReferenceExpression("name") }; // CodeVariableReferenceExpression. yield return new object[] { new CodeVariableReferenceExpression("name") }; // CodeIndexerExpression. yield return new object[] { new CodeIndexerExpression(new CodePrimitiveExpression(1)) }; yield return new object[] { new CodeIndexerExpression(new CodePrimitiveExpression(1), new CodePrimitiveExpression(2)) }; // CodeArrayIndexerExpression. yield return new object[] { new CodeArrayIndexerExpression(new CodePrimitiveExpression(1)) }; yield return new object[] { new CodeArrayIndexerExpression(new CodePrimitiveExpression(1), new CodePrimitiveExpression(2)) }; // CodeSnippetExpression. yield return new object[] { new CodeSnippetExpression() }; yield return new object[] { new CodeSnippetExpression(null) }; yield return new object[] { new CodeSnippetExpression(string.Empty) }; yield return new object[] { new CodeSnippetExpression("0") }; yield return new object[] { new CodeSnippetExpression("name") }; // CodeMethodInvokeExpression. yield return new object[] { new CodeMethodInvokeExpression(new CodeMethodReferenceExpression(null, "name")) }; yield return new object[] { new CodeMethodInvokeExpression(new CodeMethodReferenceExpression(new CodePrimitiveExpression(1), "name")) }; yield return new object[] { new CodeMethodInvokeExpression(new CodeMethodReferenceExpression(new CodePrimitiveExpression(1), "name", new CodeTypeReference[] { new CodeTypeReference((string)null), new CodeTypeReference(string.Empty), new CodeTypeReference("name") }), new CodePrimitiveExpression(1)) }; // CodeMethodReferenceExpression. yield return new object[] { new CodeMethodReferenceExpression(null, "name") }; yield return new object[] { new CodeMethodReferenceExpression(new CodePrimitiveExpression(1), "name") }; yield return new object[] { new CodeMethodReferenceExpression(new CodePrimitiveExpression(1), "name", new CodeTypeReference[] { new CodeTypeReference((string)null), new CodeTypeReference(string.Empty), new CodeTypeReference("name") }) }; // CodeEventReferenceExpression. yield return new object[] { new CodeEventReferenceExpression(null, "name") }; yield return new object[] { new CodeEventReferenceExpression(new CodePrimitiveExpression(1), "name") }; // CodeDelegateInvokeExpression. yield return new object[] { new CodeDelegateInvokeExpression() }; yield return new object[] { new CodeDelegateInvokeExpression(new CodePrimitiveExpression(1)) }; yield return new object[] { new CodeDelegateInvokeExpression(new CodePrimitiveExpression(1), new CodePrimitiveExpression(2)) }; // CodeObjectCreateExpression. yield return new object[] { new CodeObjectCreateExpression() }; yield return new object[] { new CodeObjectCreateExpression(new CodeTypeReference("name")) }; yield return new object[] { new CodeObjectCreateExpression(new CodeTypeReference("name"), new CodePrimitiveExpression(1)) }; // CodeDirectionExpression. yield return new object[] { new CodeDirectionExpression(FieldDirection.In, new CodePrimitiveExpression(1)) }; // CodePrimitiveExpression. yield return new object[] { new CodePrimitiveExpression() }; yield return new object[] { new CodePrimitiveExpression(1) }; yield return new object[] { new CodePrimitiveExpression(null) }; yield return new object[] { new CodePrimitiveExpression(string.Empty) }; yield return new object[] { new CodePrimitiveExpression("0") }; yield return new object[] { new CodePrimitiveExpression("name") }; // CodePropertyReferenceExpression. yield return new object[] { new CodePropertyReferenceExpression(null, "name") }; yield return new object[] { new CodePropertyReferenceExpression(new CodePrimitiveExpression(1), "name") }; // CodePropertySetValueReferenceExpression. yield return new object[] { new CodePropertySetValueReferenceExpression() }; // CodeThisReferenceExpression. yield return new object[] { new CodeThisReferenceExpression() }; // CodeTypeReferenceExpression. yield return new object[] { new CodeTypeReferenceExpression() }; yield return new object[] { new CodeTypeReferenceExpression(new CodeTypeReference("name")) }; // CodeTypeOfExpression. yield return new object[] { new CodeTypeOfExpression() }; yield return new object[] { new CodeTypeOfExpression(new CodeTypeReference("name")) }; // CodeMethodReturnStatement. yield return new object[] { new CodeMethodReturnStatement() }; yield return new object[] { new CodeMethodReturnStatement(null) }; yield return new object[] { new CodeMethodReturnStatement(new CodePrimitiveExpression("1")) }; // CodeConditionStatement. yield return new object[] { new CodeConditionStatement(new CodePrimitiveExpression("1")) }; yield return new object[] { new CodeConditionStatement(new CodePrimitiveExpression("1"), new CodeStatement[] { new CodeMethodReturnStatement(), new CodeMethodReturnStatement { LinePragma = new CodeLinePragma() } }, new CodeStatement[0]) }; yield return new object[] { new CodeConditionStatement(new CodePrimitiveExpression("1"), new CodeStatement[0], new CodeStatement[] { new CodeMethodReturnStatement(), new CodeMethodReturnStatement { LinePragma = new CodeLinePragma() } }) }; yield return new object[] { new CodeConditionStatement(new CodePrimitiveExpression("1"), new CodeStatement[] { new CodeMethodReturnStatement(), new CodeMethodReturnStatement { LinePragma = new CodeLinePragma() } }, new CodeStatement[] { new CodeMethodReturnStatement(), new CodeMethodReturnStatement { LinePragma = new CodeLinePragma() } }) }; // CodeTryCatchFinallyStatement. yield return new object[] { new CodeTryCatchFinallyStatement() }; yield return new object[] { new CodeTryCatchFinallyStatement( new CodeStatement[] { new CodeMethodReturnStatement(), new CodeMethodReturnStatement { LinePragma = new CodeLinePragma() } }, new CodeCatchClause[] { new CodeCatchClause("localName"), new CodeCatchClause("localName", new CodeTypeReference("exceptionType")), new CodeCatchClause("localName", new CodeTypeReference("exceptionType"), new CodeMethodReturnStatement(), new CodeMethodReturnStatement { LinePragma = new CodeLinePragma() }) }, new CodeStatement[] { new CodeMethodReturnStatement(), new CodeMethodReturnStatement { LinePragma = new CodeLinePragma() } } ) }; yield return new object[] { new CodeTryCatchFinallyStatement( new CodeStatement[0], new CodeCatchClause[] { new CodeCatchClause("localName"), new CodeCatchClause("localName", new CodeTypeReference("exceptionType")), new CodeCatchClause("localName", new CodeTypeReference("exceptionType"), new CodeMethodReturnStatement(), new CodeMethodReturnStatement { LinePragma = new CodeLinePragma() }) }, new CodeStatement[] { new CodeMethodReturnStatement(), new CodeMethodReturnStatement { LinePragma = new CodeLinePragma() } } ) }; yield return new object[] { new CodeTryCatchFinallyStatement( new CodeStatement[] { new CodeMethodReturnStatement(), new CodeMethodReturnStatement { LinePragma = new CodeLinePragma() } }, new CodeCatchClause[0], new CodeStatement[] { new CodeMethodReturnStatement(), new CodeMethodReturnStatement { LinePragma = new CodeLinePragma() } } ) }; yield return new object[] { new CodeTryCatchFinallyStatement( new CodeStatement[] { new CodeMethodReturnStatement(), new CodeMethodReturnStatement { LinePragma = new CodeLinePragma() } }, new CodeCatchClause[] { new CodeCatchClause("localName"), new CodeCatchClause("localName", new CodeTypeReference("exceptionType")), new CodeCatchClause("localName", new CodeTypeReference("exceptionType"), new CodeMethodReturnStatement(), new CodeMethodReturnStatement { LinePragma = new CodeLinePragma() }) }, new CodeStatement[0] ) }; // CodeAssignStatement. yield return new object[] { new CodeAssignStatement(new CodePrimitiveExpression(1), new CodePrimitiveExpression(1)) }; // CodeExpressionStatement. yield return new object[] { new CodeExpressionStatement(new CodePrimitiveExpression("1")) }; // CodeIterationStatement. yield return new object[] { new CodeIterationStatement(new CodeMethodReturnStatement(), new CodePrimitiveExpression(1), new CodeMethodReturnStatement(), new CodeMethodReturnStatement(), new CodeMethodReturnStatement { LinePragma = new CodeLinePragma() }) }; yield return new object[] { new CodeIterationStatement(new CodeMethodReturnStatement(), new CodePrimitiveExpression(1), new CodeMethodReturnStatement()) }; // CodeThrowExceptionStatement. yield return new object[] { new CodeThrowExceptionStatement() }; yield return new object[] { new CodeThrowExceptionStatement(null) }; yield return new object[] { new CodeThrowExceptionStatement(new CodePrimitiveExpression(1)) }; // CodeSnippetStatement. yield return new object[] { new CodeSnippetStatement() }; yield return new object[] { new CodeSnippetStatement(null) }; yield return new object[] { new CodeSnippetStatement(string.Empty) }; yield return new object[] { new CodeSnippetStatement("value") }; // CodeVariableDeclarationStatement. yield return new object[] { new CodeVariableDeclarationStatement(new CodeTypeReference("name"), "name") }; yield return new object[] { new CodeVariableDeclarationStatement(new CodeTypeReference("name"), "name", new CodePrimitiveExpression(1)) }; // CodeAttachEventStatement. yield return new object[] { new CodeAttachEventStatement(new CodeEventReferenceExpression(null, "name"), new CodePrimitiveExpression(1)) }; yield return new object[] { new CodeAttachEventStatement(new CodeEventReferenceExpression(new CodePrimitiveExpression(1), "name"), new CodePrimitiveExpression(1)) }; // CodeRemoveEventStatement. yield return new object[] { new CodeRemoveEventStatement(new CodeEventReferenceExpression(null, "name"), new CodePrimitiveExpression(1)) }; yield return new object[] { new CodeRemoveEventStatement(new CodeEventReferenceExpression(new CodePrimitiveExpression(1), "name"), new CodePrimitiveExpression(1)) }; // CodeGotoStatement. yield return new object[] { new CodeGotoStatement("name") }; // CodeLabeledStatement. yield return new object[] { new CodeLabeledStatement("name") }; yield return new object[] { new CodeLabeledStatement("name", null) }; yield return new object[] { new CodeLabeledStatement("name", new CodeMethodReturnStatement()) }; // Misc. yield return new object[] { new CodeTypeReference(":") }; yield return new object[] { new CodeTypeReference(".") }; yield return new object[] { new CodeTypeReference("$") }; yield return new object[] { new CodeTypeReference("+") }; yield return new object[] { new CodeTypeReference("<") }; yield return new object[] { new CodeTypeReference(">") }; yield return new object[] { new CodeTypeReference("-") }; yield return new object[] { new CodeTypeReference("[") }; yield return new object[] { new CodeTypeReference("]") }; yield return new object[] { new CodeTypeReference(",") }; yield return new object[] { new CodeTypeReference("&") }; yield return new object[] { new CodeTypeReference("*") }; yield return new object[] { new CodeTypeReference("_abc") }; } [Theory] [MemberData(nameof(ValidateIdentifiers_Valid_TestData))] public void ValidateIdentifiers_InvokeValid_Nop(CodeObject e) { CodeGenerator.ValidateIdentifiers(e); } public static IEnumerable<object[]> ValidateIdentifiers_Invalid_TestData() { // CodeTypeReference. yield return new object[] { new CodeTypeReference() }; yield return new object[] { new CodeTypeReference("0") }; var invalidTypeReference1 = new CodeTypeReference("name`2"); invalidTypeReference1.TypeArguments.Add("type1"); yield return new object[] { invalidTypeReference1 }; var invalidTypeReference2 = new CodeTypeReference("name`2"); invalidTypeReference2.TypeArguments.Add(new CodeTypeReference()); invalidTypeReference2.TypeArguments.Add("name"); yield return new object[] { invalidTypeReference2 }; var invalidTypeReference3 = new CodeTypeReference("name`2"); invalidTypeReference3.TypeArguments.Add(new CodeTypeReference("0")); invalidTypeReference3.TypeArguments.Add("name"); yield return new object[] { invalidTypeReference3 }; // CodeChecksumPragma. yield return new object[] { new CodeChecksumPragma("\0", Guid.NewGuid(), new byte[0]) }; // CodeRegionDirective. foreach (char newLineChar in new char[] { '\r', '\n', '\u2028', '\u2029', '\u0085' }) { yield return new object[] { new CodeRegionDirective(CodeRegionMode.None, $"te{newLineChar}xt") }; } // CodeNamespaceImport. yield return new object[] { new CodeNamespaceImport() }; yield return new object[] { new CodeNamespaceImport(null) }; yield return new object[] { new CodeNamespaceImport(string.Empty) }; yield return new object[] { new CodeNamespaceImport("0") }; var invalidNamespaceImport1 = new CodeNamespace(); invalidNamespaceImport1.Imports.Add(new CodeNamespaceImport()); yield return new object[] { invalidNamespaceImport1 }; var invalidNamespaceImport2 = new CodeNamespace(); invalidNamespaceImport2.Imports.Add(new CodeNamespaceImport(string.Empty)); yield return new object[] { invalidNamespaceImport2 }; var invalidNamespaceImport3 = new CodeNamespace(); invalidNamespaceImport3.Imports.Add(new CodeNamespaceImport(string.Empty)); yield return new object[] { invalidNamespaceImport3 }; // CodeMemberEvent. yield return new object[] { new CodeMemberEvent { PrivateImplementationType = new CodeTypeReference("name") } }; yield return new object[] { new CodeMemberEvent { Name = null, PrivateImplementationType = new CodeTypeReference("name") } }; yield return new object[] { new CodeMemberEvent { Name = string.Empty, PrivateImplementationType = new CodeTypeReference("name") } }; yield return new object[] { new CodeMemberEvent { Name = "0", PrivateImplementationType = new CodeTypeReference("name") } }; yield return new object[] { new CodeMemberEvent { Name = "name", PrivateImplementationType = new CodeTypeReference("name"), Type = new CodeTypeReference() } }; yield return new object[] { new CodeMemberEvent { Name = "name", PrivateImplementationType = new CodeTypeReference("name"), Type = new CodeTypeReference("0") } }; yield return new object[] { new CodeMemberEvent { Name = "name", PrivateImplementationType = new CodeTypeReference("name"), Type = invalidTypeReference1 } }; yield return new object[] { new CodeMemberEvent { Name = "name", PrivateImplementationType = new CodeTypeReference("name"), Type = invalidTypeReference2 } }; yield return new object[] { new CodeMemberEvent { Name = "name", PrivateImplementationType = new CodeTypeReference("name"), Type = invalidTypeReference3 } }; var invalidEventStartDirective1 = new CodeMemberEvent(); invalidEventStartDirective1.StartDirectives.Add(new CodeDirective()); yield return new object[] { invalidEventStartDirective1 }; var invalidEventStartDirective2 = new CodeMemberEvent(); invalidEventStartDirective2.StartDirectives.Add(new CodeChecksumPragma("\0", Guid.NewGuid(), new byte[0])); yield return new object[] { invalidEventStartDirective2 }; foreach (char newLineChar in new char[] { '\r', '\n', '\u2028', '\u2029', '\u0085' }) { var invalidEventStartDirective3 = new CodeMemberEvent(); invalidEventStartDirective3.StartDirectives.Add(new CodeRegionDirective(CodeRegionMode.None, $"te{newLineChar}xt")); yield return new object[] { invalidEventStartDirective3 }; } var invalidEventEndDirective1 = new CodeMemberEvent(); invalidEventEndDirective1.EndDirectives.Add(new CodeDirective()); yield return new object[] { invalidEventEndDirective1 }; var invalidEventEndDirective2 = new CodeMemberEvent(); invalidEventEndDirective2.EndDirectives.Add(new CodeChecksumPragma("\0", Guid.NewGuid(), new byte[0])); yield return new object[] { invalidEventEndDirective2 }; foreach (char newLineChar in new char[] { '\r', '\n', '\u2028', '\u2029', '\u0085' }) { var invalidEventEndDirective3 = new CodeMemberEvent(); invalidEventEndDirective3.EndDirectives.Add(new CodeRegionDirective(CodeRegionMode.None, $"te{newLineChar}xt")); yield return new object[] { invalidEventEndDirective3 }; } var invalidEventImplementationType1 = new CodeMemberEvent(); invalidEventImplementationType1.ImplementationTypes.Add(new CodeTypeReference()); yield return new object[] { invalidEventImplementationType1 }; var invalidEventImplementationType2 = new CodeMemberEvent(); invalidEventImplementationType2.ImplementationTypes.Add(new CodeTypeReference("0")); yield return new object[] { invalidEventImplementationType2 }; var invalidEventImplementationType3 = new CodeMemberEvent(); invalidEventImplementationType3.ImplementationTypes.Add(invalidTypeReference1); yield return new object[] { invalidEventImplementationType3 }; var invalidEventImplementationType4 = new CodeMemberEvent(); invalidEventImplementationType4.ImplementationTypes.Add(invalidTypeReference2); yield return new object[] { invalidEventImplementationType4 }; var invalidEventImplementationType5 = new CodeMemberEvent(); invalidEventImplementationType5.ImplementationTypes.Add(invalidTypeReference3); yield return new object[] { invalidEventImplementationType5 }; // CodeMemberField. yield return new object[] { new CodeMemberField() }; yield return new object[] { new CodeMemberField(new CodeTypeReference(), "name") }; yield return new object[] { new CodeMemberField(new CodeTypeReference("0"), "name") }; yield return new object[] { new CodeMemberField(invalidTypeReference1, "name") }; yield return new object[] { new CodeMemberField(invalidTypeReference2, "name") }; yield return new object[] { new CodeMemberField(invalidTypeReference3, "name") }; yield return new object[] { new CodeMemberField(new CodeTypeReference("type"), null) }; yield return new object[] { new CodeMemberField(new CodeTypeReference("type"), string.Empty) }; yield return new object[] { new CodeMemberField(new CodeTypeReference("type"), "0") }; var invalidFieldAttribute1 = new CodeMemberField(new CodeTypeReference("type"), "name"); invalidFieldAttribute1.CustomAttributes.Add(new CodeAttributeDeclaration()); yield return new object[] { invalidFieldAttribute1 }; var invalidFieldAttribute2 = new CodeMemberField(new CodeTypeReference("type"), "name"); invalidFieldAttribute2.CustomAttributes.Add(new CodeAttributeDeclaration((string)null)); yield return new object[] { invalidFieldAttribute2 }; var invalidFieldAttribute3 = new CodeMemberField(new CodeTypeReference("type"), "name"); invalidFieldAttribute3.CustomAttributes.Add(new CodeAttributeDeclaration(string.Empty)); yield return new object[] { invalidFieldAttribute3 }; var invalidFieldAttribute4 = new CodeMemberField(new CodeTypeReference("type"), "name"); invalidFieldAttribute4.CustomAttributes.Add(new CodeAttributeDeclaration("0")); yield return new object[] { invalidFieldAttribute4 }; var invalidFieldAttribute5 = new CodeMemberField(new CodeTypeReference("type"), "name"); invalidFieldAttribute5.CustomAttributes.Add(new CodeAttributeDeclaration("name", new CodeAttributeArgument("0", new CodePrimitiveExpression(1)))); yield return new object[] { invalidFieldAttribute5 }; var invalidFieldAttribute6 = new CodeMemberField(new CodeTypeReference("type"), "name"); invalidFieldAttribute6.CustomAttributes.Add(new CodeAttributeDeclaration("name", new CodeAttributeArgument("name", new CodeExpression()))); yield return new object[] { invalidFieldAttribute6 }; var invalidFieldStartDirective1 = new CodeMemberField(new CodeTypeReference("type"), "name"); invalidFieldStartDirective1.StartDirectives.Add(new CodeDirective()); yield return new object[] { invalidFieldStartDirective1 }; var invalidFieldStartDirective2 = new CodeMemberField(new CodeTypeReference("type"), "name"); invalidFieldStartDirective2.StartDirectives.Add(new CodeChecksumPragma("\0", Guid.NewGuid(), new byte[0])); yield return new object[] { invalidFieldStartDirective2 }; foreach (char newLineChar in new char[] { '\r', '\n', '\u2028', '\u2029', '\u0085' }) { var invalidFieldStartDirective3 = new CodeMemberField(new CodeTypeReference("type"), "name"); invalidFieldStartDirective3.StartDirectives.Add(new CodeRegionDirective(CodeRegionMode.None, $"te{newLineChar}xt")); yield return new object[] { invalidFieldStartDirective3 }; } var invalidFieldEndDirective1 = new CodeMemberField(new CodeTypeReference("type"), "name"); invalidFieldEndDirective1.EndDirectives.Add(new CodeDirective()); yield return new object[] { invalidFieldEndDirective1 }; var invalidFieldEndDirective2 = new CodeMemberField(new CodeTypeReference("type"), "name"); invalidFieldEndDirective2.EndDirectives.Add(new CodeChecksumPragma("\0", Guid.NewGuid(), new byte[0])); yield return new object[] { invalidFieldEndDirective2 }; foreach (char newLineChar in new char[] { '\r', '\n', '\u2028', '\u2029', '\u0085' }) { var invalidFieldEndDirective3 = new CodeMemberField(new CodeTypeReference("type"), "name"); invalidFieldEndDirective3.EndDirectives.Add(new CodeRegionDirective(CodeRegionMode.None, $"te{newLineChar}xt")); yield return new object[] { invalidFieldEndDirective3 }; } yield return new object[] { new CodeMemberField(new CodeTypeReference("type"), "name") { InitExpression = new CodeExpression() } }; // CodeParameterDeclarationExpression. yield return new object[] { new CodeParameterDeclarationExpression() }; yield return new object[] { new CodeParameterDeclarationExpression(new CodeTypeReference(), "name") }; yield return new object[] { new CodeParameterDeclarationExpression(new CodeTypeReference("0"), "name") }; yield return new object[] { new CodeParameterDeclarationExpression(invalidTypeReference1, "name") }; yield return new object[] { new CodeParameterDeclarationExpression(invalidTypeReference2, "name") }; yield return new object[] { new CodeParameterDeclarationExpression(invalidTypeReference3, "name") }; yield return new object[] { new CodeParameterDeclarationExpression(new CodeTypeReference("type"), null) }; yield return new object[] { new CodeParameterDeclarationExpression(new CodeTypeReference("type"), string.Empty) }; yield return new object[] { new CodeParameterDeclarationExpression(new CodeTypeReference("type"), "0") }; var invalidParameterAttribute1 = new CodeParameterDeclarationExpression(new CodeTypeReference("type"), "name"); invalidParameterAttribute1.CustomAttributes.Add(new CodeAttributeDeclaration()); yield return new object[] { invalidParameterAttribute1 }; var invalidParameterAttribute2 = new CodeParameterDeclarationExpression(new CodeTypeReference("type"), "name"); invalidParameterAttribute2.CustomAttributes.Add(new CodeAttributeDeclaration((string)null)); yield return new object[] { invalidParameterAttribute2 }; var invalidParameterAttribute3 = new CodeParameterDeclarationExpression(new CodeTypeReference("type"), "name"); invalidParameterAttribute3.CustomAttributes.Add(new CodeAttributeDeclaration(string.Empty)); yield return new object[] { invalidParameterAttribute3 }; var invalidParameterAttribute4 = new CodeParameterDeclarationExpression(new CodeTypeReference("type"), "name"); invalidParameterAttribute4.CustomAttributes.Add(new CodeAttributeDeclaration("0")); yield return new object[] { invalidParameterAttribute4 }; var invalidParameterAttribute5 = new CodeParameterDeclarationExpression(new CodeTypeReference("type"), "name"); invalidParameterAttribute5.CustomAttributes.Add(new CodeAttributeDeclaration("name", new CodeAttributeArgument("0", new CodePrimitiveExpression(1)))); yield return new object[] { invalidParameterAttribute5 }; var invalidParameterAttribute6 = new CodeParameterDeclarationExpression(new CodeTypeReference("type"), "name"); invalidParameterAttribute6.CustomAttributes.Add(new CodeAttributeDeclaration("name", new CodeAttributeArgument("name", new CodeExpression()))); yield return new object[] { invalidParameterAttribute6 }; // CodeMemberMethod. yield return new object[] { new CodeMemberMethod() }; yield return new object[] { new CodeMemberMethod { Name = null } }; yield return new object[] { new CodeMemberMethod { Name = string.Empty } }; yield return new object[] { new CodeMemberMethod { Name = "0" } }; yield return new object[] { new CodeMemberMethod { Name = "name", ReturnType = new CodeTypeReference() } }; yield return new object[] { new CodeMemberMethod { Name = "name", ReturnType = new CodeTypeReference("0") } }; yield return new object[] { new CodeMemberMethod { Name = "name", ReturnType = invalidTypeReference1 } }; yield return new object[] { new CodeMemberMethod { Name = "name", ReturnType = invalidTypeReference2 } }; yield return new object[] { new CodeMemberMethod { Name = "name", ReturnType = invalidTypeReference3 } }; yield return new object[] { new CodeMemberMethod { Name = "name", PrivateImplementationType = new CodeTypeReference() } }; yield return new object[] { new CodeMemberMethod { Name = "name", PrivateImplementationType = new CodeTypeReference("0") } }; yield return new object[] { new CodeMemberMethod { Name = "name", PrivateImplementationType = invalidTypeReference1 } }; yield return new object[] { new CodeMemberMethod { Name = "name", PrivateImplementationType = invalidTypeReference2 } }; yield return new object[] { new CodeMemberMethod { Name = "name", PrivateImplementationType = invalidTypeReference3 } }; var invalidMethodAttribute1 = new CodeMemberMethod { Name = "name" }; invalidMethodAttribute1.CustomAttributes.Add(new CodeAttributeDeclaration()); yield return new object[] { invalidMethodAttribute1 }; var invalidMethodAttribute2 = new CodeMemberMethod { Name = "name" }; invalidMethodAttribute2.CustomAttributes.Add(new CodeAttributeDeclaration((string)null)); yield return new object[] { invalidMethodAttribute2 }; var invalidMethodAttribute3 = new CodeMemberMethod { Name = "name" }; invalidMethodAttribute3.CustomAttributes.Add(new CodeAttributeDeclaration(string.Empty)); yield return new object[] { invalidMethodAttribute3 }; var invalidMethodAttribute4 = new CodeMemberMethod { Name = "name" }; invalidMethodAttribute4.CustomAttributes.Add(new CodeAttributeDeclaration("0")); yield return new object[] { invalidMethodAttribute4 }; var invalidMethodAttribute5 = new CodeMemberMethod { Name = "name" }; invalidMethodAttribute5.CustomAttributes.Add(new CodeAttributeDeclaration("name", new CodeAttributeArgument("0", new CodePrimitiveExpression(1)))); yield return new object[] { invalidMethodAttribute5 }; var invalidMethodAttribute6 = new CodeMemberMethod { Name = "name" }; invalidMethodAttribute6.CustomAttributes.Add(new CodeAttributeDeclaration("name", new CodeAttributeArgument("name", new CodeExpression()))); yield return new object[] { invalidMethodAttribute6 }; var invalidMethodStartDirective1 = new CodeMemberMethod { Name = "name" }; invalidMethodStartDirective1.StartDirectives.Add(new CodeDirective()); yield return new object[] { invalidMethodStartDirective1 }; var invalidMethodStartDirective2 = new CodeMemberMethod { Name = "name" }; invalidMethodStartDirective2.StartDirectives.Add(new CodeChecksumPragma("\0", Guid.NewGuid(), new byte[0])); yield return new object[] { invalidMethodStartDirective2 }; foreach (char newLineChar in new char[] { '\r', '\n', '\u2028', '\u2029', '\u0085' }) { var invalidMethodStartDirective3 = new CodeMemberMethod { Name = "name" }; invalidMethodStartDirective3.StartDirectives.Add(new CodeRegionDirective(CodeRegionMode.None, $"te{newLineChar}xt")); yield return new object[] { invalidMethodStartDirective3 }; } var invalidMethodEndDirective1 = new CodeMemberMethod { Name = "name" }; invalidMethodEndDirective1.EndDirectives.Add(new CodeDirective()); yield return new object[] { invalidMethodEndDirective1 }; var invalidMethodEndDirective2 = new CodeMemberMethod { Name = "name" }; invalidMethodEndDirective2.EndDirectives.Add(new CodeChecksumPragma("\0", Guid.NewGuid(), new byte[0])); yield return new object[] { invalidMethodEndDirective2 }; foreach (char newLineChar in new char[] { '\r', '\n', '\u2028', '\u2029', '\u0085' }) { var invalidMethodEndDirective3 = new CodeMemberMethod { Name = "name" }; invalidMethodEndDirective3.EndDirectives.Add(new CodeRegionDirective(CodeRegionMode.None, $"te{newLineChar}xt")); yield return new object[] { invalidMethodEndDirective3 }; } var invalidMethodImplementationType1 = new CodeMemberMethod { Name = "name" }; invalidMethodImplementationType1.ImplementationTypes.Add(new CodeTypeReference()); yield return new object[] { invalidMethodImplementationType1 }; var invalidMethodImplementationType2 = new CodeMemberMethod { Name = "name" }; invalidMethodImplementationType2.ImplementationTypes.Add(new CodeTypeReference("0")); yield return new object[] { invalidMethodImplementationType2 }; var invalidMethodImplementationType3 = new CodeMemberMethod { Name = "name" }; invalidMethodImplementationType3.ImplementationTypes.Add(invalidTypeReference1); yield return new object[] { invalidMethodImplementationType3 }; var invalidMethodImplementationType4 = new CodeMemberMethod { Name = "name" }; invalidMethodImplementationType4.ImplementationTypes.Add(invalidTypeReference2); yield return new object[] { invalidMethodImplementationType4 }; var invalidMethodImplementationType5 = new CodeMemberMethod { Name = "name" }; invalidMethodImplementationType5.ImplementationTypes.Add(invalidTypeReference3); yield return new object[] { invalidMethodImplementationType5 }; var invalidMethodReturnTypeAttribute1 = new CodeMemberMethod { Name = "name" }; invalidMethodReturnTypeAttribute1.ReturnTypeCustomAttributes.Add(new CodeAttributeDeclaration()); yield return new object[] { invalidMethodReturnTypeAttribute1 }; var invalidMethodReturnTypeAttribute2 = new CodeMemberMethod { Name = "name" }; invalidMethodReturnTypeAttribute2.ReturnTypeCustomAttributes.Add(new CodeAttributeDeclaration((string)null)); yield return new object[] { invalidMethodReturnTypeAttribute2 }; var invalidMethodReturnTypeAttribute3 = new CodeMemberMethod { Name = "name" }; invalidMethodReturnTypeAttribute3.ReturnTypeCustomAttributes.Add(new CodeAttributeDeclaration(string.Empty)); yield return new object[] { invalidMethodReturnTypeAttribute3 }; var invalidMethodReturnTypeAttribute4 = new CodeMemberMethod { Name = "name" }; invalidMethodReturnTypeAttribute4.ReturnTypeCustomAttributes.Add(new CodeAttributeDeclaration("0")); yield return new object[] { invalidMethodReturnTypeAttribute4 }; var invalidMethodReturnTypeAttribute5 = new CodeMemberMethod { Name = "name" }; invalidMethodReturnTypeAttribute5.ReturnTypeCustomAttributes.Add(new CodeAttributeDeclaration("name", new CodeAttributeArgument("0", new CodePrimitiveExpression(1)))); yield return new object[] { invalidMethodReturnTypeAttribute5 }; var invalidMethodReturnTypeAttribute6 = new CodeMemberMethod { Name = "name" }; invalidMethodReturnTypeAttribute6.ReturnTypeCustomAttributes.Add(new CodeAttributeDeclaration("name", new CodeAttributeArgument("name", new CodeExpression()))); yield return new object[] { invalidMethodReturnTypeAttribute6 }; var invalidMethodStatement = new CodeMemberMethod { Name = "name" }; invalidMethodStatement.Statements.Add(new CodeStatement()); yield return new object[] { invalidMethodStatement }; var invalidMethodParameter1 = new CodeMemberMethod { Name = "name" }; invalidMethodParameter1.Parameters.Add(new CodeParameterDeclarationExpression()); yield return new object[] { invalidMethodParameter1 }; var invalidMethodParameter2 = new CodeMemberMethod { Name = "name" }; invalidMethodParameter2.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference(), "name")); yield return new object[] { invalidMethodParameter2 }; var invalidMethodParameter3 = new CodeMemberMethod { Name = "name" }; invalidMethodParameter3.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference("0"), "name")); yield return new object[] { invalidMethodParameter3 }; var invalidMethodParameter4 = new CodeMemberMethod { Name = "name" }; invalidMethodParameter4.Parameters.Add(new CodeParameterDeclarationExpression(invalidTypeReference1, "name")); yield return new object[] { invalidMethodParameter4 }; var invalidMethodParameter5 = new CodeMemberMethod { Name = "name" }; invalidMethodParameter5.Parameters.Add(new CodeParameterDeclarationExpression(invalidTypeReference2, "name")); yield return new object[] { invalidMethodParameter5 }; var invalidMethodParameter6 = new CodeMemberMethod { Name = "name" }; invalidMethodParameter6.Parameters.Add(new CodeParameterDeclarationExpression(invalidTypeReference3, "name")); yield return new object[] { invalidMethodParameter6 }; var invalidMethodParameter7 = new CodeMemberMethod { Name = "name" }; invalidMethodParameter7.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference("type"), null)); yield return new object[] { invalidMethodParameter7 }; var invalidMethodParameter8 = new CodeMemberMethod { Name = "name" }; invalidMethodParameter8.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference("type"), string.Empty)); yield return new object[] { invalidMethodParameter8 }; var invalidMethodParameter9 = new CodeMemberMethod { Name = "name" }; invalidMethodParameter9.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference("type"), "0")); yield return new object[] { invalidMethodParameter9 }; var invalidMethodParameterAttribute1 = new CodeMemberMethod { Name = "name" }; invalidMethodParameterAttribute1.Parameters.Add(invalidParameterAttribute1); yield return new object[] { invalidMethodParameterAttribute1 }; var invalidMethodParameterAttribute2 = new CodeMemberMethod { Name = "name" }; invalidMethodParameterAttribute2.Parameters.Add(invalidParameterAttribute2); yield return new object[] { invalidMethodParameterAttribute2 }; var invalidMethodParameterAttribute3 = new CodeMemberMethod { Name = "name" }; invalidMethodParameterAttribute3.Parameters.Add(invalidParameterAttribute3); yield return new object[] { invalidMethodParameterAttribute3 }; var invalidMethodParameterAttribute4 = new CodeMemberMethod { Name = "name" }; invalidMethodParameterAttribute4.Parameters.Add(invalidParameterAttribute4); yield return new object[] { invalidMethodParameterAttribute4 }; var invalidMethodParameterAttribute5 = new CodeMemberMethod { Name = "name" }; invalidMethodParameterAttribute5.Parameters.Add(invalidParameterAttribute5); yield return new object[] { invalidMethodParameterAttribute5 }; var invalidMethodParameterAttribute6 = new CodeMemberMethod { Name = "name" }; invalidMethodParameterAttribute6.Parameters.Add(invalidParameterAttribute6); yield return new object[] { invalidMethodParameterAttribute6 }; var invalidMethodTypeParameter1 = new CodeMemberMethod { Name = "name" }; invalidMethodTypeParameter1.TypeParameters.Add(new CodeTypeParameter()); yield return new object[] { invalidMethodTypeParameter1 }; var invalidMethodTypeParameter2 = new CodeMemberMethod { Name = "name" }; invalidMethodTypeParameter2.TypeParameters.Add(new CodeTypeParameter(null)); yield return new object[] { invalidMethodTypeParameter2 }; var invalidMethodTypeParameter3 = new CodeMemberMethod { Name = "name" }; invalidMethodTypeParameter3.TypeParameters.Add(new CodeTypeParameter(string.Empty)); yield return new object[] { invalidMethodTypeParameter3 }; var invalidMethodTypeParameter4 = new CodeMemberMethod { Name = "name" }; invalidMethodTypeParameter4.TypeParameters.Add(new CodeTypeParameter("0")); yield return new object[] { invalidMethodTypeParameter4 }; var invalidTypeParameterAttribute1 = new CodeTypeParameter("parameter"); invalidTypeParameterAttribute1.CustomAttributes.Add(new CodeAttributeDeclaration()); var invalidMethodTypeParameterAttribute1 = new CodeMemberMethod { Name = "name" }; invalidMethodTypeParameterAttribute1.TypeParameters.Add(invalidTypeParameterAttribute1); yield return new object[] { invalidMethodTypeParameterAttribute1 }; var invalidTypeParameterAttribute2 = new CodeTypeParameter("parameter"); invalidTypeParameterAttribute2.CustomAttributes.Add(new CodeAttributeDeclaration((string)null)); var invalidMethodTypeParameterAttribute2 = new CodeMemberMethod { Name = "name" }; invalidMethodTypeParameterAttribute2.TypeParameters.Add(invalidTypeParameterAttribute2); yield return new object[] { invalidMethodTypeParameterAttribute2 }; var invalidTypeParameterAttribute3 = new CodeTypeParameter("parameter"); invalidTypeParameterAttribute3.CustomAttributes.Add(new CodeAttributeDeclaration(string.Empty)); var invalidMethodTypeParameterAttribute3 = new CodeMemberMethod { Name = "name" }; invalidMethodTypeParameterAttribute3.TypeParameters.Add(invalidTypeParameterAttribute3); yield return new object[] { invalidMethodTypeParameterAttribute3 }; var invalidTypeParameterAttribute4 = new CodeTypeParameter("parameter"); invalidTypeParameterAttribute4.CustomAttributes.Add(new CodeAttributeDeclaration("0")); var invalidMethodTypeParameterAttribute4 = new CodeMemberMethod { Name = "name" }; invalidMethodTypeParameterAttribute4.TypeParameters.Add(invalidTypeParameterAttribute4); yield return new object[] { invalidMethodTypeParameterAttribute4 }; var invalidTypeParameterAttribute5 = new CodeTypeParameter("parameter"); invalidTypeParameterAttribute5.CustomAttributes.Add(new CodeAttributeDeclaration("attribute", new CodeAttributeArgument("0", new CodePrimitiveExpression(1)))); var invalidMethodTypeParameterAttribute5 = new CodeMemberMethod { Name = "name" }; invalidMethodTypeParameterAttribute5.TypeParameters.Add(invalidTypeParameterAttribute5); yield return new object[] { invalidMethodTypeParameterAttribute5 }; var invalidTypeParameterAttribute6 = new CodeTypeParameter("parameter"); invalidTypeParameterAttribute6.CustomAttributes.Add(new CodeAttributeDeclaration("attribute", new CodeAttributeArgument("ARG", new CodeExpression()))); var invalidMethodTypeParameterAttribute6 = new CodeMemberMethod { Name = "name" }; invalidMethodTypeParameterAttribute6.TypeParameters.Add(invalidTypeParameterAttribute6); yield return new object[] { invalidMethodTypeParameterAttribute6 }; // CodeEntryPointMethod. var invalidEntryPointMethodStartDirective1 = new CodeEntryPointMethod { Name = "name" }; invalidEntryPointMethodStartDirective1.StartDirectives.Add(new CodeDirective()); yield return new object[] { invalidEntryPointMethodStartDirective1 }; var invalidEntryPointMethodStartDirective2 = new CodeEntryPointMethod { Name = "name" }; invalidEntryPointMethodStartDirective2.StartDirectives.Add(new CodeChecksumPragma("\0", Guid.NewGuid(), new byte[0])); yield return new object[] { invalidEntryPointMethodStartDirective2 }; foreach (char newLineChar in new char[] { '\r', '\n', '\u2028', '\u2029', '\u0085' }) { var invalidEntryPointMethodStartDirective3 = new CodeEntryPointMethod { Name = "name" }; invalidEntryPointMethodStartDirective3.StartDirectives.Add(new CodeRegionDirective(CodeRegionMode.None, $"te{newLineChar}xt")); yield return new object[] { invalidEntryPointMethodStartDirective3 }; } var invalidEntryPointMethodEndDirective1 = new CodeEntryPointMethod { Name = "name" }; invalidEntryPointMethodEndDirective1.EndDirectives.Add(new CodeDirective()); yield return new object[] { invalidEntryPointMethodEndDirective1 }; var invalidEntryPointMethodEndDirective2 = new CodeEntryPointMethod { Name = "name" }; invalidEntryPointMethodEndDirective2.EndDirectives.Add(new CodeChecksumPragma("\0", Guid.NewGuid(), new byte[0])); yield return new object[] { invalidEntryPointMethodEndDirective2 }; foreach (char newLineChar in new char[] { '\r', '\n', '\u2028', '\u2029', '\u0085' }) { var invalidEntryPointMethodEndDirective3 = new CodeEntryPointMethod { Name = "name" }; invalidEntryPointMethodEndDirective3.EndDirectives.Add(new CodeRegionDirective(CodeRegionMode.None, $"te{newLineChar}xt")); yield return new object[] { invalidEntryPointMethodEndDirective3 }; } var invalidEntryPointMethodImplementationType1 = new CodeEntryPointMethod { Name = "name" }; invalidEntryPointMethodImplementationType1.ImplementationTypes.Add(new CodeTypeReference()); yield return new object[] { invalidEntryPointMethodImplementationType1 }; var invalidEntryPointMethodImplementationType2 = new CodeEntryPointMethod { Name = "name" }; invalidEntryPointMethodImplementationType2.ImplementationTypes.Add(new CodeTypeReference("0")); yield return new object[] { invalidEntryPointMethodImplementationType2 }; var invalidEntryPointMethodImplementationType3 = new CodeEntryPointMethod { Name = "name" }; invalidEntryPointMethodImplementationType3.ImplementationTypes.Add(invalidTypeReference1); yield return new object[] { invalidEntryPointMethodImplementationType3 }; var invalidEntryPointMethodImplementationType4 = new CodeEntryPointMethod { Name = "name" }; invalidEntryPointMethodImplementationType4.ImplementationTypes.Add(invalidTypeReference2); yield return new object[] { invalidEntryPointMethodImplementationType4 }; var invalidEntryPointMethodImplementationType5 = new CodeEntryPointMethod { Name = "name" }; invalidEntryPointMethodImplementationType5.ImplementationTypes.Add(invalidTypeReference3); yield return new object[] { invalidEntryPointMethodImplementationType5 }; var invalidEntryPointMethodStatement1 = new CodeEntryPointMethod { Name = "name" }; invalidEntryPointMethodStatement1.Statements.Add(new CodeStatement()); yield return new object[] { invalidEntryPointMethodStatement1 }; var invalidEntryPointMethodStatement2 = new CodeEntryPointMethod { Name = "name", Attributes = MemberAttributes.Abstract }; invalidEntryPointMethodStatement2.Statements.Add(new CodeStatement()); yield return new object[] { invalidEntryPointMethodStatement2 }; var invalidEntryPointMethodTypeParameter1 = new CodeEntryPointMethod { Name = "name" }; invalidEntryPointMethodTypeParameter1.TypeParameters.Add(new CodeTypeParameter()); yield return new object[] { invalidEntryPointMethodTypeParameter1 }; var invalidEntryPointMethodTypeParameter2 = new CodeEntryPointMethod { Name = "name" }; invalidEntryPointMethodTypeParameter2.TypeParameters.Add(new CodeTypeParameter(null)); yield return new object[] { invalidEntryPointMethodTypeParameter2 }; var invalidEntryPointMethodTypeParameter3 = new CodeEntryPointMethod { Name = "name" }; invalidEntryPointMethodTypeParameter3.TypeParameters.Add(new CodeTypeParameter(string.Empty)); yield return new object[] { invalidEntryPointMethodTypeParameter3 }; var invalidEntryPointMethodTypeParameter4 = new CodeEntryPointMethod { Name = "name" }; invalidEntryPointMethodTypeParameter4.TypeParameters.Add(new CodeTypeParameter("0")); yield return new object[] { invalidEntryPointMethodTypeParameter4 }; var invalidEntryPointMethodTypeParameterAttribute1 = new CodeEntryPointMethod { Name = "name" }; invalidEntryPointMethodTypeParameterAttribute1.TypeParameters.Add(invalidTypeParameterAttribute1); yield return new object[] { invalidEntryPointMethodTypeParameterAttribute1 }; var invalidEntryPointMethodTypeParameterAttribute2 = new CodeEntryPointMethod { Name = "name" }; invalidEntryPointMethodTypeParameterAttribute2.TypeParameters.Add(invalidTypeParameterAttribute2); yield return new object[] { invalidEntryPointMethodTypeParameterAttribute2 }; var invalidEntryPointMethodTypeParameterAttribute3 = new CodeEntryPointMethod { Name = "name" }; invalidEntryPointMethodTypeParameterAttribute3.TypeParameters.Add(invalidTypeParameterAttribute3); yield return new object[] { invalidEntryPointMethodTypeParameterAttribute3 }; var invalidEntryPointMethodTypeParameterAttribute4 = new CodeEntryPointMethod { Name = "name" }; invalidEntryPointMethodTypeParameterAttribute4.TypeParameters.Add(invalidTypeParameterAttribute4); yield return new object[] { invalidEntryPointMethodTypeParameterAttribute4 }; var invalidEntryPointMethodTypeParameterAttribute5 = new CodeEntryPointMethod { Name = "name" }; invalidEntryPointMethodTypeParameterAttribute5.TypeParameters.Add(invalidTypeParameterAttribute5); yield return new object[] { invalidEntryPointMethodTypeParameterAttribute5 }; var invalidEntryPointMethodTypeParameterAttribute6 = new CodeEntryPointMethod { Name = "name" }; invalidEntryPointMethodTypeParameterAttribute6.TypeParameters.Add(invalidTypeParameterAttribute6); yield return new object[] { invalidEntryPointMethodTypeParameterAttribute6 }; // CodeConstructor. var invalidConstructorAttribute1 = new CodeConstructor { Name = "name" }; invalidConstructorAttribute1.CustomAttributes.Add(new CodeAttributeDeclaration()); yield return new object[] { invalidConstructorAttribute1 }; var invalidConstructorAttribute2 = new CodeConstructor { Name = "name" }; invalidConstructorAttribute2.CustomAttributes.Add(new CodeAttributeDeclaration((string)null)); yield return new object[] { invalidConstructorAttribute2 }; var invalidConstructorAttribute3 = new CodeConstructor { Name = "name" }; invalidConstructorAttribute3.CustomAttributes.Add(new CodeAttributeDeclaration(string.Empty)); yield return new object[] { invalidConstructorAttribute3 }; var invalidConstructorAttribute4 = new CodeConstructor { Name = "name" }; invalidConstructorAttribute4.CustomAttributes.Add(new CodeAttributeDeclaration("0")); yield return new object[] { invalidConstructorAttribute4 }; var invalidConstructorAttribute5 = new CodeConstructor { Name = "name" }; invalidConstructorAttribute5.CustomAttributes.Add(new CodeAttributeDeclaration("name", new CodeAttributeArgument("0", new CodePrimitiveExpression(1)))); yield return new object[] { invalidConstructorAttribute5 }; var invalidConstructorAttribute6 = new CodeConstructor { Name = "name" }; invalidConstructorAttribute6.CustomAttributes.Add(new CodeAttributeDeclaration("name", new CodeAttributeArgument("name", new CodeExpression()))); yield return new object[] { invalidConstructorAttribute6 }; var invalidConstructorStartDirective1 = new CodeConstructor { Name = "name" }; invalidConstructorStartDirective1.StartDirectives.Add(new CodeDirective()); yield return new object[] { invalidConstructorStartDirective1 }; var invalidConstructorStartDirective2 = new CodeConstructor { Name = "name" }; invalidConstructorStartDirective2.StartDirectives.Add(new CodeChecksumPragma("\0", Guid.NewGuid(), new byte[0])); yield return new object[] { invalidConstructorStartDirective2 }; foreach (char newLineChar in new char[] { '\r', '\n', '\u2028', '\u2029', '\u0085' }) { var invalidConstructorStartDirective3 = new CodeConstructor { Name = "name" }; invalidConstructorStartDirective3.StartDirectives.Add(new CodeRegionDirective(CodeRegionMode.None, $"te{newLineChar}xt")); yield return new object[] { invalidConstructorStartDirective3 }; } var invalidConstructorEndDirective1 = new CodeConstructor { Name = "name" }; invalidConstructorEndDirective1.EndDirectives.Add(new CodeDirective()); yield return new object[] { invalidConstructorEndDirective1 }; var invalidConstructorEndDirective2 = new CodeConstructor { Name = "name" }; invalidConstructorEndDirective2.EndDirectives.Add(new CodeChecksumPragma("\0", Guid.NewGuid(), new byte[0])); yield return new object[] { invalidConstructorEndDirective2 }; foreach (char newLineChar in new char[] { '\r', '\n', '\u2028', '\u2029', '\u0085' }) { var invalidConstructorEndDirective3 = new CodeConstructor { Name = "name" }; invalidConstructorEndDirective3.EndDirectives.Add(new CodeRegionDirective(CodeRegionMode.None, $"te{newLineChar}xt")); yield return new object[] { invalidConstructorEndDirective3 }; } var invalidConstructorImplementationType1 = new CodeConstructor { Name = "name" }; invalidConstructorImplementationType1.ImplementationTypes.Add(new CodeTypeReference()); yield return new object[] { invalidConstructorImplementationType1 }; var invalidConstructorImplementationType2 = new CodeConstructor { Name = "name" }; invalidConstructorImplementationType2.ImplementationTypes.Add(new CodeTypeReference("0")); yield return new object[] { invalidConstructorImplementationType2 }; var invalidConstructorImplementationType3 = new CodeConstructor { Name = "name" }; invalidConstructorImplementationType3.ImplementationTypes.Add(invalidTypeReference1); yield return new object[] { invalidConstructorImplementationType3 }; var invalidConstructorImplementationType4 = new CodeConstructor { Name = "name" }; invalidConstructorImplementationType4.ImplementationTypes.Add(invalidTypeReference2); yield return new object[] { invalidConstructorImplementationType4 }; var invalidConstructorImplementationType5 = new CodeConstructor { Name = "name" }; invalidConstructorImplementationType5.ImplementationTypes.Add(invalidTypeReference3); yield return new object[] { invalidConstructorImplementationType5 }; var invalidConstructorStatement1 = new CodeConstructor { Name = "name" }; invalidConstructorStatement1.Statements.Add(new CodeStatement()); yield return new object[] { invalidConstructorStatement1 }; var invalidConstructorStatement2 = new CodeConstructor { Name = "name", Attributes = MemberAttributes.Abstract }; invalidConstructorStatement2.Statements.Add(new CodeStatement()); yield return new object[] { invalidConstructorStatement2 }; var invalidConstructorParameter1 = new CodeConstructor { Name = "name" }; invalidConstructorParameter1.Parameters.Add(new CodeParameterDeclarationExpression()); yield return new object[] { invalidConstructorParameter1 }; var invalidConstructorParameter2 = new CodeConstructor { Name = "name" }; invalidConstructorParameter2.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference(), "name")); yield return new object[] { invalidConstructorParameter2 }; var invalidConstructorParameter3 = new CodeConstructor { Name = "name" }; invalidConstructorParameter3.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference("0"), "name")); yield return new object[] { invalidConstructorParameter3 }; var invalidConstructorParameter4 = new CodeConstructor { Name = "name" }; invalidConstructorParameter4.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference("type"), null)); yield return new object[] { invalidConstructorParameter4 }; var invalidConstructorParameter5 = new CodeConstructor { Name = "name" }; invalidConstructorParameter5.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference("type"), string.Empty)); yield return new object[] { invalidConstructorParameter5 }; var invalidConstructorParameter6 = new CodeConstructor { Name = "name" }; invalidConstructorParameter6.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference("type"), "0")); yield return new object[] { invalidConstructorParameter6 }; var invalidConstructorParameterAttribute1 = new CodeConstructor { Name = "name" }; invalidConstructorParameterAttribute1.Parameters.Add(invalidParameterAttribute1); yield return new object[] { invalidConstructorParameterAttribute1 }; var invalidConstructorParameterAttribute2 = new CodeConstructor { Name = "name" }; invalidConstructorParameterAttribute2.Parameters.Add(invalidParameterAttribute2); yield return new object[] { invalidConstructorParameterAttribute2 }; var invalidConstructorParameterAttribute3 = new CodeConstructor { Name = "name" }; invalidConstructorParameterAttribute3.Parameters.Add(invalidParameterAttribute3); yield return new object[] { invalidConstructorParameterAttribute3 }; var invalidConstructorParameterAttribute4 = new CodeConstructor { Name = "name" }; invalidConstructorParameterAttribute4.Parameters.Add(invalidParameterAttribute4); yield return new object[] { invalidConstructorParameterAttribute4 }; var invalidConstructorParameterAttribute5 = new CodeConstructor { Name = "name" }; invalidConstructorParameterAttribute5.Parameters.Add(invalidParameterAttribute5); yield return new object[] { invalidConstructorParameterAttribute5 }; var invalidConstructorParameterAttribute6 = new CodeConstructor { Name = "name" }; invalidConstructorParameterAttribute6.Parameters.Add(invalidParameterAttribute6); yield return new object[] { invalidConstructorParameterAttribute6 }; var invalidConstructorTypeParameter1 = new CodeConstructor { Name = "name" }; invalidConstructorTypeParameter1.TypeParameters.Add(new CodeTypeParameter()); yield return new object[] { invalidConstructorTypeParameter1 }; var invalidConstructorTypeParameter2 = new CodeConstructor { Name = "name" }; invalidConstructorTypeParameter2.TypeParameters.Add(new CodeTypeParameter(null)); yield return new object[] { invalidConstructorTypeParameter2 }; var invalidConstructorTypeParameter3 = new CodeConstructor { Name = "name" }; invalidConstructorTypeParameter3.TypeParameters.Add(new CodeTypeParameter(string.Empty)); yield return new object[] { invalidConstructorTypeParameter3 }; var invalidConstructorTypeParameter4 = new CodeConstructor { Name = "name" }; invalidConstructorTypeParameter4.TypeParameters.Add(new CodeTypeParameter("0")); yield return new object[] { invalidConstructorTypeParameter4 }; var invalidConstructorTypeParameterAttribute1 = new CodeConstructor { Name = "name" }; invalidConstructorTypeParameterAttribute1.TypeParameters.Add(invalidTypeParameterAttribute1); yield return new object[] { invalidConstructorTypeParameterAttribute1 }; var invalidConstructorTypeParameterAttribute2 = new CodeConstructor { Name = "name" }; invalidConstructorTypeParameterAttribute2.TypeParameters.Add(invalidTypeParameterAttribute2); yield return new object[] { invalidConstructorTypeParameterAttribute2 }; var invalidConstructorTypeParameterAttribute3 = new CodeConstructor { Name = "name" }; invalidConstructorTypeParameterAttribute3.TypeParameters.Add(invalidTypeParameterAttribute3); yield return new object[] { invalidConstructorTypeParameterAttribute3 }; var invalidConstructorTypeParameterAttribute4 = new CodeConstructor { Name = "name" }; invalidConstructorTypeParameterAttribute4.TypeParameters.Add(invalidTypeParameterAttribute4); yield return new object[] { invalidConstructorTypeParameterAttribute4 }; var invalidConstructorTypeParameterAttribute5 = new CodeConstructor { Name = "name" }; invalidConstructorTypeParameterAttribute5.TypeParameters.Add(invalidTypeParameterAttribute5); yield return new object[] { invalidConstructorTypeParameterAttribute5 }; var invalidConstructorTypeParameterAttribute6 = new CodeConstructor { Name = "name" }; invalidConstructorTypeParameterAttribute6.TypeParameters.Add(invalidTypeParameterAttribute6); yield return new object[] { invalidConstructorTypeParameterAttribute6 }; var invalidConstructorBaseConstructorArg = new CodeConstructor { Name = "name" }; invalidConstructorBaseConstructorArg.BaseConstructorArgs.Add(new CodeExpression()); yield return new object[] { invalidConstructorBaseConstructorArg }; var invalidConstructorChainedConstructorArg = new CodeConstructor { Name = "name" }; invalidConstructorChainedConstructorArg.ChainedConstructorArgs.Add(new CodeExpression()); yield return new object[] { invalidConstructorChainedConstructorArg }; // CodeTypeConstructor. var invalidTypeConstructorStartDirective1 = new CodeTypeConstructor { Name = "name" }; invalidTypeConstructorStartDirective1.StartDirectives.Add(new CodeDirective()); yield return new object[] { invalidTypeConstructorStartDirective1 }; var invalidTypeConstructorStartDirective2 = new CodeTypeConstructor { Name = "name" }; invalidTypeConstructorStartDirective2.StartDirectives.Add(new CodeChecksumPragma("\0", Guid.NewGuid(), new byte[0])); yield return new object[] { invalidTypeConstructorStartDirective2 }; foreach (char newLineChar in new char[] { '\r', '\n', '\u2028', '\u2029', '\u0085' }) { var invalidTypeConstructorStartDirective3 = new CodeTypeConstructor { Name = "name" }; invalidTypeConstructorStartDirective3.StartDirectives.Add(new CodeRegionDirective(CodeRegionMode.None, $"te{newLineChar}xt")); yield return new object[] { invalidTypeConstructorStartDirective3 }; } var invalidTypeConstructorEndDirective1 = new CodeTypeConstructor { Name = "name" }; invalidTypeConstructorEndDirective1.EndDirectives.Add(new CodeDirective()); yield return new object[] { invalidTypeConstructorEndDirective1 }; var invalidTypeConstructorEndDirective2 = new CodeTypeConstructor { Name = "name" }; invalidTypeConstructorEndDirective2.EndDirectives.Add(new CodeChecksumPragma("\0", Guid.NewGuid(), new byte[0])); yield return new object[] { invalidTypeConstructorEndDirective2 }; foreach (char newLineChar in new char[] { '\r', '\n', '\u2028', '\u2029', '\u0085' }) { var invalidTypeConstructorEndDirective3 = new CodeTypeConstructor { Name = "name" }; invalidTypeConstructorEndDirective3.EndDirectives.Add(new CodeRegionDirective(CodeRegionMode.None, $"te{newLineChar}xt")); yield return new object[] { invalidTypeConstructorEndDirective3 }; } var invalidTypeConstructorImplementationType1 = new CodeTypeConstructor { Name = "name" }; invalidTypeConstructorImplementationType1.ImplementationTypes.Add(new CodeTypeReference()); yield return new object[] { invalidTypeConstructorImplementationType1 }; var invalidTypeConstructorImplementationType2 = new CodeTypeConstructor { Name = "name" }; invalidTypeConstructorImplementationType2.ImplementationTypes.Add(new CodeTypeReference("0")); yield return new object[] { invalidTypeConstructorImplementationType2 }; var invalidTypeConstructorImplementationType3 = new CodeTypeConstructor { Name = "name" }; invalidTypeConstructorImplementationType3.ImplementationTypes.Add(invalidTypeReference1); yield return new object[] { invalidTypeConstructorImplementationType3 }; var invalidTypeConstructorImplementationType4 = new CodeTypeConstructor { Name = "name" }; invalidTypeConstructorImplementationType4.ImplementationTypes.Add(invalidTypeReference2); yield return new object[] { invalidTypeConstructorImplementationType4 }; var invalidTypeConstructorImplementationType5 = new CodeTypeConstructor { Name = "name" }; invalidTypeConstructorImplementationType5.ImplementationTypes.Add(invalidTypeReference3); yield return new object[] { invalidTypeConstructorImplementationType5 }; var invalidTypeConstructorStatement1 = new CodeTypeConstructor { Name = "name" }; invalidTypeConstructorStatement1.Statements.Add(new CodeStatement()); yield return new object[] { invalidTypeConstructorStatement1 }; var invalidTypeConstructorStatement2 = new CodeTypeConstructor { Name = "name", Attributes = MemberAttributes.Abstract }; invalidTypeConstructorStatement2.Statements.Add(new CodeStatement()); yield return new object[] { invalidTypeConstructorStatement2 }; var invalidTypeConstructorTypeParameter1 = new CodeTypeConstructor { Name = "name" }; invalidTypeConstructorTypeParameter1.TypeParameters.Add(new CodeTypeParameter()); yield return new object[] { invalidTypeConstructorTypeParameter1 }; var invalidTypeConstructorTypeParameter2 = new CodeTypeConstructor { Name = "name" }; invalidTypeConstructorTypeParameter2.TypeParameters.Add(new CodeTypeParameter(null)); yield return new object[] { invalidTypeConstructorTypeParameter2 }; var invalidTypeConstructorTypeParameter3 = new CodeTypeConstructor { Name = "name" }; invalidTypeConstructorTypeParameter3.TypeParameters.Add(new CodeTypeParameter(string.Empty)); yield return new object[] { invalidTypeConstructorTypeParameter3 }; var invalidTypeConstructorTypeParameter4 = new CodeTypeConstructor { Name = "name" }; invalidTypeConstructorTypeParameter4.TypeParameters.Add(new CodeTypeParameter("0")); yield return new object[] { invalidTypeConstructorTypeParameter4 }; var invalidTypeConstructorTypeParameterAttribute1 = new CodeTypeConstructor { Name = "name" }; invalidTypeConstructorTypeParameterAttribute1.TypeParameters.Add(invalidTypeParameterAttribute1); yield return new object[] { invalidTypeConstructorTypeParameterAttribute1 }; var invalidTypeConstructorTypeParameterAttribute2 = new CodeTypeConstructor { Name = "name" }; invalidTypeConstructorTypeParameterAttribute2.TypeParameters.Add(invalidTypeParameterAttribute2); yield return new object[] { invalidTypeConstructorTypeParameterAttribute2 }; var invalidTypeConstructorTypeParameterAttribute3 = new CodeTypeConstructor { Name = "name" }; invalidTypeConstructorTypeParameterAttribute3.TypeParameters.Add(invalidTypeParameterAttribute3); yield return new object[] { invalidTypeConstructorTypeParameterAttribute3 }; var invalidTypeConstructorTypeParameterAttribute4 = new CodeTypeConstructor { Name = "name" }; invalidTypeConstructorTypeParameterAttribute4.TypeParameters.Add(invalidTypeParameterAttribute4); yield return new object[] { invalidTypeConstructorTypeParameterAttribute4 }; var invalidTypeConstructorTypeParameterAttribute5 = new CodeTypeConstructor { Name = "name" }; invalidTypeConstructorTypeParameterAttribute5.TypeParameters.Add(invalidTypeParameterAttribute5); yield return new object[] { invalidTypeConstructorTypeParameterAttribute5 }; var invalidTypeConstructorTypeParameterAttribute6 = new CodeTypeConstructor { Name = "name" }; invalidTypeConstructorTypeParameterAttribute6.TypeParameters.Add(invalidTypeParameterAttribute6); yield return new object[] { invalidTypeConstructorTypeParameterAttribute6 }; // CodeMemberProperty. yield return new object[] { new CodeMemberProperty() }; yield return new object[] { new CodeMemberProperty { Name = null } }; yield return new object[] { new CodeMemberProperty { Name = string.Empty } }; yield return new object[] { new CodeMemberProperty { Name = "0" } }; yield return new object[] { new CodeMemberProperty { Name = "name", PrivateImplementationType = new CodeTypeReference() } }; yield return new object[] { new CodeMemberProperty { Name = "name", PrivateImplementationType = new CodeTypeReference("0") } }; yield return new object[] { new CodeMemberProperty { Name = "name", PrivateImplementationType = invalidTypeReference1 } }; yield return new object[] { new CodeMemberProperty { Name = "name", PrivateImplementationType = invalidTypeReference2 } }; yield return new object[] { new CodeMemberProperty { Name = "name", PrivateImplementationType = invalidTypeReference3 } }; var invalidPropertyStartDirective1 = new CodeMemberProperty { Name = "name" }; invalidPropertyStartDirective1.StartDirectives.Add(new CodeDirective()); yield return new object[] { invalidPropertyStartDirective1 }; var invalidPropertyStartDirective2 = new CodeMemberProperty { Name = "name" }; invalidPropertyStartDirective2.StartDirectives.Add(new CodeChecksumPragma("\0", Guid.NewGuid(), new byte[0])); yield return new object[] { invalidPropertyStartDirective2 }; foreach (char newLineChar in new char[] { '\r', '\n', '\u2028', '\u2029', '\u0085' }) { var invalidPropertyStartDirective3 = new CodeMemberProperty { Name = "name" }; invalidPropertyStartDirective3.StartDirectives.Add(new CodeRegionDirective(CodeRegionMode.None, $"te{newLineChar}xt")); yield return new object[] { invalidPropertyStartDirective3 }; } var invalidPropertyEndDirective1 = new CodeMemberProperty { Name = "name" }; invalidPropertyEndDirective1.EndDirectives.Add(new CodeDirective()); yield return new object[] { invalidPropertyEndDirective1 }; var invalidPropertyEndDirective2 = new CodeMemberProperty { Name = "name" }; invalidPropertyEndDirective2.EndDirectives.Add(new CodeChecksumPragma("\0", Guid.NewGuid(), new byte[0])); yield return new object[] { invalidPropertyEndDirective2 }; foreach (char newLineChar in new char[] { '\r', '\n', '\u2028', '\u2029', '\u0085' }) { var invalidPropertyEndDirective3 = new CodeMemberProperty { Name = "name" }; invalidPropertyEndDirective3.EndDirectives.Add(new CodeRegionDirective(CodeRegionMode.None, $"te{newLineChar}xt")); yield return new object[] { invalidPropertyEndDirective3 }; } foreach (string name in new string[] { "item", "Item" }) { var invalidPropertyParameter1 = new CodeMemberProperty { Name = name }; invalidPropertyParameter1.Parameters.Add(new CodeParameterDeclarationExpression()); yield return new object[] { invalidPropertyParameter1 }; var invalidPropertyParameter2 = new CodeMemberProperty { Name = name }; invalidPropertyParameter2.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference(), "name")); yield return new object[] { invalidPropertyParameter2 }; var invalidPropertyParameter3 = new CodeMemberProperty { Name = name }; invalidPropertyParameter3.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference("0"), "name")); yield return new object[] { invalidPropertyParameter3 }; var invalidPropertyParameter4 = new CodeMemberProperty { Name = name }; invalidPropertyParameter4.Parameters.Add(new CodeParameterDeclarationExpression(invalidTypeReference1, "name")); yield return new object[] { invalidPropertyParameter4 }; var invalidPropertyParameter5 = new CodeMemberProperty { Name = name }; invalidPropertyParameter5.Parameters.Add(new CodeParameterDeclarationExpression(invalidTypeReference2, "name")); yield return new object[] { invalidPropertyParameter5 }; var invalidPropertyParameter6 = new CodeMemberProperty { Name = name }; invalidPropertyParameter6.Parameters.Add(new CodeParameterDeclarationExpression(invalidTypeReference3, "name")); yield return new object[] { invalidPropertyParameter6 }; var invalidPropertyParameter7 = new CodeMemberProperty { Name = name }; invalidPropertyParameter7.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference("type"), null)); yield return new object[] { invalidPropertyParameter7 }; var invalidPropertyParameter8 = new CodeMemberProperty { Name = name }; invalidPropertyParameter8.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference("type"), string.Empty)); yield return new object[] { invalidPropertyParameter8 }; var invalidPropertyParameter9 = new CodeMemberProperty { Name = name }; invalidPropertyParameter9.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference("type"), "0")); yield return new object[] { invalidPropertyParameter9 }; var invalidPropertyParameterAttribute1 = new CodeMemberProperty { Name = name }; invalidPropertyParameterAttribute1.Parameters.Add(invalidParameterAttribute1); yield return new object[] { invalidPropertyParameterAttribute1 }; var invalidPropertyParameterAttribute2 = new CodeMemberProperty { Name = name }; invalidPropertyParameterAttribute2.Parameters.Add(invalidParameterAttribute2); yield return new object[] { invalidPropertyParameterAttribute2 }; var invalidPropertyParameterAttribute3 = new CodeMemberProperty { Name = name }; invalidPropertyParameterAttribute3.Parameters.Add(invalidParameterAttribute3); yield return new object[] { invalidPropertyParameterAttribute3 }; var invalidPropertyParameterAttribute4 = new CodeMemberProperty { Name = name }; invalidPropertyParameterAttribute4.Parameters.Add(invalidParameterAttribute4); yield return new object[] { invalidPropertyParameterAttribute4 }; var invalidPropertyParameterAttribute5 = new CodeMemberProperty { Name = name }; invalidPropertyParameterAttribute5.Parameters.Add(invalidParameterAttribute5); yield return new object[] { invalidPropertyParameterAttribute5 }; var invalidPropertyParameterAttribute6 = new CodeMemberProperty { Name = name }; invalidPropertyParameterAttribute6.Parameters.Add(invalidParameterAttribute6); yield return new object[] { invalidPropertyParameterAttribute6 }; } var invalidPropertyImplementationType1 = new CodeMemberProperty { Name = "name" }; invalidPropertyImplementationType1.ImplementationTypes.Add(new CodeTypeReference()); yield return new object[] { invalidPropertyImplementationType1 }; var invalidPropertyImplementationType2 = new CodeMemberProperty { Name = "name" }; invalidPropertyImplementationType2.ImplementationTypes.Add(new CodeTypeReference("0")); yield return new object[] { invalidPropertyImplementationType2 }; var invalidPropertyImplementationType3 = new CodeMemberProperty { Name = "name" }; invalidPropertyImplementationType3.ImplementationTypes.Add(invalidTypeReference1); yield return new object[] { invalidPropertyImplementationType3 }; var invalidPropertyImplementationType4 = new CodeMemberProperty { Name = "name" }; invalidPropertyImplementationType4.ImplementationTypes.Add(invalidTypeReference2); yield return new object[] { invalidPropertyImplementationType4 }; var invalidPropertyImplementationType5 = new CodeMemberProperty { Name = "name" }; invalidPropertyImplementationType5.ImplementationTypes.Add(invalidTypeReference3); yield return new object[] { invalidPropertyImplementationType5 }; var invalidPropertyGetStatement = new CodeMemberProperty { Name = "name" }; invalidPropertyGetStatement.GetStatements.Add(new CodeStatement()); yield return new object[] { invalidPropertyGetStatement }; var invalidPropertySetStatement = new CodeMemberProperty { Name = "name" }; invalidPropertySetStatement.SetStatements.Add(new CodeStatement()); yield return new object[] { invalidPropertySetStatement }; // CodeTypeDeclaration. yield return new object[] { new CodeTypeDeclaration() }; yield return new object[] { new CodeTypeDeclaration(null) }; yield return new object[] { new CodeTypeDeclaration(string.Empty) }; yield return new object[] { new CodeTypeDeclaration("0") }; var invalidTypeAttribute1 = new CodeTypeDeclaration("name"); invalidTypeAttribute1.CustomAttributes.Add(new CodeAttributeDeclaration()); yield return new object[] { invalidTypeAttribute1 }; var invalidTypeAttribute2 = new CodeTypeDeclaration("name"); invalidTypeAttribute2.CustomAttributes.Add(new CodeAttributeDeclaration((string)null)); yield return new object[] { invalidTypeAttribute2 }; var invalidTypeAttribute3 = new CodeTypeDeclaration("name"); invalidTypeAttribute3.CustomAttributes.Add(new CodeAttributeDeclaration(string.Empty)); yield return new object[] { invalidTypeAttribute3 }; var invalidTypeAttribute4 = new CodeTypeDeclaration("name"); invalidTypeAttribute4.CustomAttributes.Add(new CodeAttributeDeclaration("0")); yield return new object[] { invalidTypeAttribute4 }; var invalidTypeAttribute5 = new CodeTypeDeclaration("name"); invalidTypeAttribute5.CustomAttributes.Add(new CodeAttributeDeclaration("name", new CodeAttributeArgument("0", new CodePrimitiveExpression(1)))); yield return new object[] { invalidTypeAttribute5 }; var invalidTypeAttribute6 = new CodeTypeDeclaration("name"); invalidTypeAttribute6.CustomAttributes.Add(new CodeAttributeDeclaration("name", new CodeAttributeArgument("name", new CodeExpression()))); yield return new object[] { invalidTypeAttribute6 }; var invalidTypeParameter1 = new CodeTypeDeclaration("name"); invalidTypeParameter1.TypeParameters.Add(new CodeTypeParameter()); yield return new object[] { invalidTypeParameter1 }; var invalidTypeParameter2 = new CodeTypeDeclaration("name"); invalidTypeParameter2.TypeParameters.Add(new CodeTypeParameter(null)); yield return new object[] { invalidTypeParameter2 }; var invalidTypeParameter3 = new CodeTypeDeclaration("name"); invalidTypeParameter3.TypeParameters.Add(new CodeTypeParameter(string.Empty)); yield return new object[] { invalidTypeParameter3 }; var invalidTypeParameter4 = new CodeTypeDeclaration("name"); invalidTypeParameter4.TypeParameters.Add(new CodeTypeParameter("0")); yield return new object[] { invalidTypeParameter4 }; var invalidTypeTypeParameterAttribute1 = new CodeTypeDeclaration("name"); invalidTypeTypeParameterAttribute1.TypeParameters.Add(invalidTypeParameterAttribute1); yield return new object[] { invalidTypeTypeParameterAttribute1 }; var invalidTypeTypeParameterAttribute2 = new CodeTypeDeclaration("name"); invalidTypeTypeParameterAttribute2.TypeParameters.Add(invalidTypeParameterAttribute2); yield return new object[] { invalidTypeTypeParameterAttribute2 }; var invalidTypeTypeParameterAttribute3 = new CodeTypeDeclaration("name"); invalidTypeTypeParameterAttribute3.TypeParameters.Add(invalidTypeParameterAttribute3); yield return new object[] { invalidTypeTypeParameterAttribute3 }; var invalidTypeTypeParameterAttribute4 = new CodeTypeDeclaration("name"); invalidTypeTypeParameterAttribute4.TypeParameters.Add(invalidTypeParameterAttribute4); yield return new object[] { invalidTypeTypeParameterAttribute4 }; var invalidTypeTypeParameterAttribute5 = new CodeTypeDeclaration("name"); invalidTypeTypeParameterAttribute5.TypeParameters.Add(invalidTypeParameterAttribute5); yield return new object[] { invalidTypeTypeParameterAttribute5 }; var invalidTypeTypeParameterAttribute6 = new CodeTypeDeclaration("name"); invalidTypeTypeParameterAttribute6.TypeParameters.Add(invalidTypeParameterAttribute6); yield return new object[] { invalidTypeTypeParameterAttribute6 }; var invalidParameterConstraint1 = new CodeTypeParameter("parameter"); invalidParameterConstraint1.Constraints.Add(new CodeTypeReference()); var invalidTypeParameterConstraint1 = new CodeTypeDeclaration("name"); invalidTypeParameterConstraint1.TypeParameters.Add(invalidParameterConstraint1); yield return new object[] { invalidTypeParameterConstraint1 }; var invalidParameterConstraint2 = new CodeTypeParameter("parameter"); invalidParameterConstraint2.Constraints.Add(new CodeTypeReference("0")); var invalidTypeParameterConstraint2 = new CodeTypeDeclaration("name"); invalidTypeParameterConstraint2.TypeParameters.Add(invalidParameterConstraint2); yield return new object[] { invalidTypeParameterConstraint2 }; var invalidParameterConstraint3 = new CodeTypeParameter("parameter"); invalidParameterConstraint3.Constraints.Add(invalidTypeReference1); var invalidTypeParameterConstraint3 = new CodeTypeDeclaration("name"); invalidTypeParameterConstraint3.TypeParameters.Add(invalidParameterConstraint3); yield return new object[] { invalidTypeParameterConstraint3 }; var invalidParameterConstraint4 = new CodeTypeParameter("parameter"); invalidParameterConstraint4.Constraints.Add(invalidTypeReference2); var invalidTypeParameterConstraint4 = new CodeTypeDeclaration("name"); invalidTypeParameterConstraint4.TypeParameters.Add(invalidParameterConstraint4); yield return new object[] { invalidTypeParameterConstraint4 }; var invalidParameterConstraint5 = new CodeTypeParameter("parameter"); invalidParameterConstraint5.Constraints.Add(invalidTypeReference3); var invalidTypeParameterConstraint5 = new CodeTypeDeclaration("name"); invalidTypeParameterConstraint5.TypeParameters.Add(invalidParameterConstraint5); yield return new object[] { invalidTypeParameterConstraint5 }; var invalidParameterConstraint6 = new CodeTypeParameter("parameter"); invalidParameterConstraint6.Constraints.Add(new CodeTypeReference("constraint`2", new CodeTypeReference("name"))); var invalidTypeParameterConstraint6 = new CodeTypeDeclaration("name"); invalidTypeParameterConstraint6.TypeParameters.Add(invalidParameterConstraint6); yield return new object[] { invalidTypeParameterConstraint6 }; var invalidParameterConstraint7 = new CodeTypeParameter("parameter"); invalidParameterConstraint7.Constraints.Add(new CodeTypeReference("constraint", new CodeTypeReference(), new CodeTypeReference("name"))); var invalidTypeParameterConstraint7 = new CodeTypeDeclaration("name"); invalidTypeParameterConstraint7.TypeParameters.Add(invalidParameterConstraint7); yield return new object[] { invalidTypeParameterConstraint7 }; var invalidParameterConstraint8 = new CodeTypeParameter("parameter"); invalidParameterConstraint8.Constraints.Add(new CodeTypeReference("constraint", new CodeTypeReference("0"), new CodeTypeReference("name"))); var invalidTypeParameterConstraint8 = new CodeTypeDeclaration("name"); invalidTypeParameterConstraint8.TypeParameters.Add(invalidParameterConstraint8); yield return new object[] { invalidTypeParameterConstraint8 }; var invalidParameterConstraint9 = new CodeTypeParameter("parameter"); invalidParameterConstraint9.Constraints.Add(new CodeTypeReference("constraint", invalidTypeReference1, new CodeTypeReference("name"))); var invalidTypeParameterConstraint9 = new CodeTypeDeclaration("name"); invalidTypeParameterConstraint9.TypeParameters.Add(invalidParameterConstraint9); yield return new object[] { invalidTypeParameterConstraint9 }; var invalidParameterConstraint10 = new CodeTypeParameter("parameter"); invalidParameterConstraint10.Constraints.Add(new CodeTypeReference("constraint", invalidTypeReference2, new CodeTypeReference("name"))); var invalidTypeParameterConstraint10 = new CodeTypeDeclaration("name"); invalidTypeParameterConstraint10.TypeParameters.Add(invalidParameterConstraint10); yield return new object[] { invalidTypeParameterConstraint10 }; var invalidParameterConstraint11 = new CodeTypeParameter("parameter"); invalidParameterConstraint11.Constraints.Add(new CodeTypeReference("constraint", invalidTypeReference3, new CodeTypeReference("name"))); var invalidTypeParameterConstraint11 = new CodeTypeDeclaration("name"); invalidTypeParameterConstraint11.TypeParameters.Add(invalidParameterConstraint11); yield return new object[] { invalidTypeParameterConstraint11 }; var invalidTypeBaseType1 = new CodeTypeDeclaration("name"); invalidTypeBaseType1.BaseTypes.Add(new CodeTypeReference()); yield return new object[] { invalidTypeBaseType1 }; var invalidTypeBaseType2 = new CodeTypeDeclaration("name"); invalidTypeBaseType2.BaseTypes.Add(new CodeTypeReference("0")); yield return new object[] { invalidTypeBaseType2 }; var invalidTypeBaseType3 = new CodeTypeDeclaration("name"); invalidTypeBaseType3.BaseTypes.Add(invalidTypeReference1); yield return new object[] { invalidTypeBaseType3 }; var invalidTypeBaseType4 = new CodeTypeDeclaration("name"); invalidTypeBaseType4.BaseTypes.Add(invalidTypeReference2); yield return new object[] { invalidTypeBaseType4 }; var invalidTypeBaseType5 = new CodeTypeDeclaration("name"); invalidTypeBaseType5.BaseTypes.Add(invalidTypeReference3); yield return new object[] { invalidTypeBaseType5 }; // CodeTypeDelegate. yield return new object[] { new CodeTypeDelegate() }; yield return new object[] { new CodeTypeDelegate(null) }; yield return new object[] { new CodeTypeDelegate(string.Empty) }; yield return new object[] { new CodeTypeDelegate("0") }; yield return new object[] { new CodeTypeDelegate("name") { ReturnType = new CodeTypeReference() } }; yield return new object[] { new CodeTypeDelegate("name") { ReturnType = new CodeTypeReference("0") } }; yield return new object[] { new CodeTypeDelegate("name") { ReturnType = invalidTypeReference1 } }; yield return new object[] { new CodeTypeDelegate("name") { ReturnType = invalidTypeReference2 } }; yield return new object[] { new CodeTypeDelegate("name") { ReturnType = invalidTypeReference3 } }; var invalidDelegateAttribute1 = new CodeTypeDelegate("name"); invalidDelegateAttribute1.CustomAttributes.Add(new CodeAttributeDeclaration()); yield return new object[] { invalidDelegateAttribute1 }; var invalidDelegateAttribute2 = new CodeTypeDelegate("name"); invalidDelegateAttribute2.CustomAttributes.Add(new CodeAttributeDeclaration((string)null)); yield return new object[] { invalidDelegateAttribute2 }; var invalidDelegateAttribute3 = new CodeTypeDelegate("name"); invalidDelegateAttribute3.CustomAttributes.Add(new CodeAttributeDeclaration(string.Empty)); yield return new object[] { invalidDelegateAttribute3 }; var invalidDelegateAttribute4 = new CodeTypeDelegate("name"); invalidDelegateAttribute4.CustomAttributes.Add(new CodeAttributeDeclaration("0")); yield return new object[] { invalidDelegateAttribute4 }; var invalidDelegateAttribute5 = new CodeTypeDelegate("name"); invalidDelegateAttribute5.CustomAttributes.Add(new CodeAttributeDeclaration("name", new CodeAttributeArgument("0", new CodePrimitiveExpression(1)))); yield return new object[] { invalidDelegateAttribute5 }; var invalidDelegateAttribute6 = new CodeTypeDelegate("name"); invalidDelegateAttribute6.CustomAttributes.Add(new CodeAttributeDeclaration("name", new CodeAttributeArgument("name", new CodeExpression()))); yield return new object[] { invalidDelegateAttribute6 }; var invalidDelegateTypeParameter1 = new CodeTypeDelegate("name"); invalidDelegateTypeParameter1.TypeParameters.Add(new CodeTypeParameter()); yield return new object[] { invalidDelegateTypeParameter1 }; var invalidDelegateTypeParameter2 = new CodeTypeDelegate("name"); invalidDelegateTypeParameter2.TypeParameters.Add(new CodeTypeParameter(null)); yield return new object[] { invalidDelegateTypeParameter2 }; var invalidDelegateTypeParameter3 = new CodeTypeDelegate("name"); invalidDelegateTypeParameter3.TypeParameters.Add(new CodeTypeParameter(string.Empty)); yield return new object[] { invalidDelegateTypeParameter3 }; var invalidDelegateTypeParameter4 = new CodeTypeDelegate("name"); invalidDelegateTypeParameter4.TypeParameters.Add(new CodeTypeParameter("0")); yield return new object[] { invalidDelegateTypeParameter4 }; var invalidDelegateTypeParameterAttribute1 = new CodeTypeDelegate("name"); invalidDelegateTypeParameterAttribute1.TypeParameters.Add(invalidTypeParameterAttribute1); yield return new object[] { invalidDelegateTypeParameterAttribute1 }; var invalidDelegateTypeParameterAttribute2 = new CodeTypeDelegate("name"); invalidDelegateTypeParameterAttribute2.TypeParameters.Add(invalidTypeParameterAttribute2); yield return new object[] { invalidDelegateTypeParameterAttribute2 }; var invalidDelegateTypeParameterAttribute3 = new CodeTypeDelegate("name"); invalidDelegateTypeParameterAttribute3.TypeParameters.Add(invalidTypeParameterAttribute3); yield return new object[] { invalidDelegateTypeParameterAttribute3 }; var invalidDelegateTypeParameterAttribute4 = new CodeTypeDelegate("name"); invalidDelegateTypeParameterAttribute4.TypeParameters.Add(invalidTypeParameterAttribute4); yield return new object[] { invalidDelegateTypeParameterAttribute4 }; var invalidDelegateTypeParameterAttribute5 = new CodeTypeDelegate("name"); invalidDelegateTypeParameterAttribute5.TypeParameters.Add(invalidTypeParameterAttribute5); yield return new object[] { invalidDelegateTypeParameterAttribute5 }; var invalidDelegateTypeParameterAttribute6 = new CodeTypeDelegate("name"); invalidDelegateTypeParameterAttribute6.TypeParameters.Add(invalidTypeParameterAttribute6); yield return new object[] { invalidDelegateTypeParameterAttribute6 }; var invalidDelegateTypeParameterConstraint1 = new CodeTypeDelegate("name"); invalidDelegateTypeParameterConstraint1.TypeParameters.Add(invalidParameterConstraint1); yield return new object[] { invalidDelegateTypeParameterConstraint1 }; var invalidDelegateTypeParameterConstraint2 = new CodeTypeDelegate("name"); invalidDelegateTypeParameterConstraint2.TypeParameters.Add(invalidParameterConstraint2); yield return new object[] { invalidDelegateTypeParameterConstraint2 }; var invalidDelegateTypeParameterConstraint3 = new CodeTypeDelegate("name"); invalidDelegateTypeParameterConstraint3.TypeParameters.Add(invalidParameterConstraint6); yield return new object[] { invalidDelegateTypeParameterConstraint3 }; var invalidDelegateTypeParameterConstraint4 = new CodeTypeDelegate("name"); invalidDelegateTypeParameterConstraint4.TypeParameters.Add(invalidParameterConstraint7); yield return new object[] { invalidDelegateTypeParameterConstraint4 }; var invalidDelegateTypeParameterConstraint5 = new CodeTypeDelegate("name"); invalidDelegateTypeParameterConstraint5.TypeParameters.Add(invalidParameterConstraint8); yield return new object[] { invalidDelegateTypeParameterConstraint5 }; var invalidDelegateBaseType1 = new CodeTypeDelegate("name"); invalidDelegateBaseType1.BaseTypes.Add(new CodeTypeReference()); yield return new object[] { invalidDelegateBaseType1 }; var invalidDelegateBaseType2 = new CodeTypeDelegate("name"); invalidDelegateBaseType2.BaseTypes.Add(new CodeTypeReference("0")); yield return new object[] { invalidDelegateBaseType2 }; var invalidDelegateBaseType3 = new CodeTypeDelegate("name"); invalidDelegateBaseType3.BaseTypes.Add(invalidTypeReference1); yield return new object[] { invalidDelegateBaseType3 }; var invalidDelegateBaseType4 = new CodeTypeDelegate("name"); invalidDelegateBaseType4.BaseTypes.Add(invalidTypeReference2); yield return new object[] { invalidDelegateBaseType4 }; var invalidDelegateBaseType5 = new CodeTypeDelegate("name"); invalidDelegateBaseType5.BaseTypes.Add(invalidTypeReference3); yield return new object[] { invalidDelegateBaseType5 }; var invalidDelegateParameter1 = new CodeTypeDelegate { Name = "name" }; invalidDelegateParameter1.Parameters.Add(new CodeParameterDeclarationExpression()); yield return new object[] { invalidDelegateParameter1 }; var invalidDelegateParameter2 = new CodeTypeDelegate { Name = "name" }; invalidDelegateParameter2.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference(), "name")); yield return new object[] { invalidDelegateParameter2 }; var invalidDelegateParameter3 = new CodeTypeDelegate { Name = "name" }; invalidDelegateParameter3.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference("0"), "name")); yield return new object[] { invalidDelegateParameter3 }; var invalidDelegateParameter4 = new CodeTypeDelegate { Name = "name" }; invalidDelegateParameter4.Parameters.Add(new CodeParameterDeclarationExpression(invalidTypeReference1, "name")); yield return new object[] { invalidDelegateParameter4 }; var invalidDelegateParameter5 = new CodeTypeDelegate { Name = "name" }; invalidDelegateParameter5.Parameters.Add(new CodeParameterDeclarationExpression(invalidTypeReference2, "name")); yield return new object[] { invalidDelegateParameter5 }; var invalidDelegateParameter6 = new CodeTypeDelegate { Name = "name" }; invalidDelegateParameter6.Parameters.Add(new CodeParameterDeclarationExpression(invalidTypeReference3, "name")); yield return new object[] { invalidDelegateParameter6 }; var invalidDelegateParameter7 = new CodeTypeDelegate { Name = "name" }; invalidDelegateParameter7.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference("type"), null)); yield return new object[] { invalidDelegateParameter7 }; var invalidDelegateParameter8 = new CodeTypeDelegate { Name = "name" }; invalidDelegateParameter8.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference("type"), string.Empty)); yield return new object[] { invalidDelegateParameter8 }; var invalidDelegateParameter9 = new CodeTypeDelegate { Name = "name" }; invalidDelegateParameter9.Parameters.Add(new CodeParameterDeclarationExpression(new CodeTypeReference("type"), "0")); yield return new object[] { invalidDelegateParameter9 }; var invalidDelegateParameterAttribute1 = new CodeTypeDelegate { Name = "name" }; invalidDelegateParameterAttribute1.Parameters.Add(invalidParameterAttribute1); yield return new object[] { invalidDelegateParameterAttribute1 }; var invalidDelegateParameterAttribute2 = new CodeTypeDelegate { Name = "name" }; invalidDelegateParameterAttribute2.Parameters.Add(invalidParameterAttribute2); yield return new object[] { invalidDelegateParameterAttribute2 }; var invalidDelegateParameterAttribute3 = new CodeTypeDelegate { Name = "name" }; invalidDelegateParameterAttribute3.Parameters.Add(invalidParameterAttribute3); yield return new object[] { invalidDelegateParameterAttribute3 }; var invalidDelegateParameterAttribute4 = new CodeTypeDelegate { Name = "name" }; invalidDelegateParameterAttribute4.Parameters.Add(invalidParameterAttribute4); yield return new object[] { invalidDelegateParameterAttribute4 }; var invalidDelegateParameterAttribute5 = new CodeTypeDelegate { Name = "name" }; invalidDelegateParameterAttribute5.Parameters.Add(invalidParameterAttribute5); yield return new object[] { invalidDelegateParameterAttribute5 }; var invalidDelegateParameterAttribute6 = new CodeTypeDelegate { Name = "name" }; invalidDelegateParameterAttribute6.Parameters.Add(invalidParameterAttribute6); yield return new object[] { invalidDelegateParameterAttribute6 }; // CodeNamespace. yield return new object[] { new CodeNamespace("0") }; var invalidNamespaceType1 = new CodeNamespace("name"); invalidNamespaceType1.Types.Add(new CodeTypeDeclaration()); yield return new object[] { invalidNamespaceType1 }; var invalidNamespaceType2 = new CodeNamespace("name"); invalidNamespaceType2.Types.Add(new CodeTypeDeclaration(null)); yield return new object[] { invalidNamespaceType2 }; var invalidNamespaceType3 = new CodeNamespace("name"); invalidNamespaceType3.Types.Add(new CodeTypeDeclaration(string.Empty)); yield return new object[] { invalidNamespaceType3 }; var invalidNamespaceType4 = new CodeNamespace("name"); invalidNamespaceType4.Types.Add(new CodeTypeDeclaration("0")); yield return new object[] { invalidNamespaceType4 }; // CodeCompileUnit. var invalidCompileUnitAttribute1 = new CodeCompileUnit(); invalidCompileUnitAttribute1.AssemblyCustomAttributes.Add(new CodeAttributeDeclaration()); yield return new object[] { invalidCompileUnitAttribute1 }; var invalidCompileUnitAttribute2 = new CodeCompileUnit(); invalidCompileUnitAttribute2.AssemblyCustomAttributes.Add(new CodeAttributeDeclaration((string)null)); yield return new object[] { invalidCompileUnitAttribute2 }; var invalidCompileUnitAttribute3 = new CodeCompileUnit(); invalidCompileUnitAttribute3.AssemblyCustomAttributes.Add(new CodeAttributeDeclaration(string.Empty)); yield return new object[] { invalidCompileUnitAttribute3 }; var invalidCompileUnitAttribute4 = new CodeCompileUnit(); invalidCompileUnitAttribute4.AssemblyCustomAttributes.Add(new CodeAttributeDeclaration("0")); yield return new object[] { invalidCompileUnitAttribute4 }; var invalidCompileUnitAttribute5 = new CodeCompileUnit(); invalidCompileUnitAttribute5.AssemblyCustomAttributes.Add(new CodeAttributeDeclaration("name", new CodeAttributeArgument("0", new CodePrimitiveExpression(1)))); yield return new object[] { invalidCompileUnitAttribute5 }; var invalidCompileUnitAttribute6 = new CodeCompileUnit(); invalidCompileUnitAttribute6.AssemblyCustomAttributes.Add(new CodeAttributeDeclaration("name", new CodeAttributeArgument("name", new CodeExpression()))); yield return new object[] { invalidCompileUnitAttribute6 }; var invalidCompileUnitStartDirective1 = new CodeCompileUnit(); invalidCompileUnitStartDirective1.StartDirectives.Add(new CodeDirective()); yield return new object[] { invalidCompileUnitStartDirective1 }; var invalidCompileUnitStartDirective2 = new CodeCompileUnit(); invalidCompileUnitStartDirective2.StartDirectives.Add(new CodeChecksumPragma("\0", Guid.NewGuid(), new byte[0])); yield return new object[] { invalidCompileUnitStartDirective2 }; foreach (char newLineChar in new char[] { '\r', '\n', '\u2028', '\u2029', '\u0085' }) { var invalidCompileUnitStartDirective3 = new CodeCompileUnit(); invalidCompileUnitStartDirective3.StartDirectives.Add(new CodeRegionDirective(CodeRegionMode.None, $"te{newLineChar}xt")); yield return new object[] { invalidCompileUnitStartDirective3 }; } var invalidCompileUnitEndDirective1 = new CodeCompileUnit(); invalidCompileUnitEndDirective1.EndDirectives.Add(new CodeDirective()); yield return new object[] { invalidCompileUnitEndDirective1 }; var invalidCompileUnitEndDirective2 = new CodeCompileUnit(); invalidCompileUnitEndDirective2.EndDirectives.Add(new CodeChecksumPragma("\0", Guid.NewGuid(), new byte[0])); yield return new object[] { invalidCompileUnitEndDirective2 }; foreach (char newLineChar in new char[] { '\r', '\n', '\u2028', '\u2029', '\u0085' }) { var invalidCompileUnitEndDirective3 = new CodeCompileUnit(); invalidCompileUnitEndDirective3.EndDirectives.Add(new CodeRegionDirective(CodeRegionMode.None, $"te{newLineChar}xt")); yield return new object[] { invalidCompileUnitEndDirective3 }; } // CodeSnippetCompileUnit. var invalidSnippetCompileUnitStartDirective1 = new CodeSnippetCompileUnit(); invalidSnippetCompileUnitStartDirective1.StartDirectives.Add(new CodeDirective()); yield return new object[] { invalidSnippetCompileUnitStartDirective1 }; var invalidSnippetCompileUnitStartDirective2 = new CodeSnippetCompileUnit(); invalidSnippetCompileUnitStartDirective2.StartDirectives.Add(new CodeChecksumPragma("\0", Guid.NewGuid(), new byte[0])); yield return new object[] { invalidSnippetCompileUnitStartDirective2 }; foreach (char newLineChar in new char[] { '\r', '\n', '\u2028', '\u2029', '\u0085' }) { var invalidSnippetCompileUnitStartDirective3 = new CodeSnippetCompileUnit(); invalidSnippetCompileUnitStartDirective3.StartDirectives.Add(new CodeRegionDirective(CodeRegionMode.None, $"te{newLineChar}xt")); yield return new object[] { invalidSnippetCompileUnitStartDirective3 }; } var invalidSnippetCompileUnitEndDirective1 = new CodeSnippetCompileUnit(); invalidSnippetCompileUnitEndDirective1.EndDirectives.Add(new CodeDirective()); yield return new object[] { invalidSnippetCompileUnitEndDirective1 }; var invalidSnippetCompileUnitEndDirective2 = new CodeSnippetCompileUnit(); invalidSnippetCompileUnitEndDirective2.EndDirectives.Add(new CodeChecksumPragma("\0", Guid.NewGuid(), new byte[0])); yield return new object[] { invalidSnippetCompileUnitEndDirective2 }; foreach (char newLineChar in new char[] { '\r', '\n', '\u2028', '\u2029', '\u0085' }) { var invalidSnippetCompileUnitEndDirective3 = new CodeSnippetCompileUnit(); invalidSnippetCompileUnitEndDirective3.EndDirectives.Add(new CodeRegionDirective(CodeRegionMode.None, $"te{newLineChar}xt")); yield return new object[] { invalidSnippetCompileUnitEndDirective3 }; } // CodeArrayCreateExpression. yield return new object[] { new CodeArrayCreateExpression(new CodeTypeReference()) }; yield return new object[] { new CodeArrayCreateExpression(new CodeTypeReference("0")) }; yield return new object[] { new CodeArrayCreateExpression(invalidTypeReference1) }; yield return new object[] { new CodeArrayCreateExpression(invalidTypeReference2) }; yield return new object[] { new CodeArrayCreateExpression(invalidTypeReference3) { SizeExpression = new CodeExpression() } } ; yield return new object[] { new CodeArrayCreateExpression(new CodeTypeReference("name"), new CodeExpression[] { new CodeExpression() }) }; // CodeBinaryOperatorExpression. yield return new object[] { new CodeBinaryOperatorExpression(new CodeExpression(), CodeBinaryOperatorType.Add, new CodePrimitiveExpression(2)) }; yield return new object[] { new CodeBinaryOperatorExpression(new CodePrimitiveExpression(1), CodeBinaryOperatorType.Add, new CodeExpression()) }; // CodeCastExpression. yield return new object[] { new CodeCastExpression(new CodeTypeReference(), new CodePrimitiveExpression(2)) }; yield return new object[] { new CodeCastExpression(new CodeTypeReference("0"), new CodePrimitiveExpression(2)) }; yield return new object[] { new CodeCastExpression(invalidTypeReference1, new CodePrimitiveExpression(2)) }; yield return new object[] { new CodeCastExpression(invalidTypeReference2, new CodePrimitiveExpression(2)) }; yield return new object[] { new CodeCastExpression(invalidTypeReference3, new CodePrimitiveExpression(2)) }; yield return new object[] { new CodeCastExpression(new CodeTypeReference("name"), new CodeExpression()) }; // CodeDefaultValueExpression. yield return new object[] { new CodeDefaultValueExpression(new CodeTypeReference()) }; yield return new object[] { new CodeDefaultValueExpression(new CodeTypeReference("0")) }; yield return new object[] { new CodeDefaultValueExpression(invalidTypeReference1) }; yield return new object[] { new CodeDefaultValueExpression(invalidTypeReference2) }; yield return new object[] { new CodeDefaultValueExpression(invalidTypeReference3) }; // CodeDelegateCreateExpression. yield return new object[] { new CodeDelegateCreateExpression(new CodeTypeReference(), new CodePrimitiveExpression(1), "name") }; yield return new object[] { new CodeDelegateCreateExpression(new CodeTypeReference("0"), new CodePrimitiveExpression(1), "name") }; yield return new object[] { new CodeDelegateCreateExpression(invalidTypeReference1, new CodePrimitiveExpression(1), "name") }; yield return new object[] { new CodeDelegateCreateExpression(invalidTypeReference2, new CodePrimitiveExpression(1), "name") }; yield return new object[] { new CodeDelegateCreateExpression(invalidTypeReference3, new CodePrimitiveExpression(1), "name") }; yield return new object[] { new CodeDelegateCreateExpression(new CodeTypeReference("name"), new CodeExpression(), "name") }; yield return new object[] { new CodeDelegateCreateExpression(new CodeTypeReference("name"), new CodePrimitiveExpression(1), null) }; yield return new object[] { new CodeDelegateCreateExpression(new CodeTypeReference("name"), new CodePrimitiveExpression(1), string.Empty) }; yield return new object[] { new CodeDelegateCreateExpression(new CodeTypeReference("name"), new CodePrimitiveExpression(1), "0") }; // CodeFieldReferenceExpression. yield return new object[] { new CodeFieldReferenceExpression() }; yield return new object[] { new CodeFieldReferenceExpression(null, null) }; yield return new object[] { new CodeFieldReferenceExpression(null, string.Empty) }; yield return new object[] { new CodeFieldReferenceExpression(null, "0") }; yield return new object[] { new CodeFieldReferenceExpression(new CodeExpression(), "name") }; // CodeArgumentReferenceExpression. yield return new object[] { new CodeArgumentReferenceExpression() }; yield return new object[] { new CodeArgumentReferenceExpression(null) }; yield return new object[] { new CodeArgumentReferenceExpression(string.Empty) }; yield return new object[] { new CodeArgumentReferenceExpression("0") }; // CodeVariableReferenceExpression. yield return new object[] { new CodeVariableReferenceExpression() }; yield return new object[] { new CodeVariableReferenceExpression(null) }; yield return new object[] { new CodeVariableReferenceExpression(string.Empty) }; yield return new object[] { new CodeVariableReferenceExpression("0") }; // CodeIndexerExpression. yield return new object[] { new CodeIndexerExpression(new CodeExpression()) }; yield return new object[] { new CodeIndexerExpression(new CodePrimitiveExpression(1), new CodeExpression()) }; // CodeArrayIndexerExpression. yield return new object[] { new CodeArrayIndexerExpression(new CodeExpression()) }; yield return new object[] { new CodeArrayIndexerExpression(new CodePrimitiveExpression(1), new CodeExpression()) }; // CodeMethodInvokeExpression. yield return new object[] { new CodeMethodInvokeExpression(new CodeMethodReferenceExpression()) }; yield return new object[] { new CodeMethodInvokeExpression(new CodeMethodReferenceExpression(null, null)) }; yield return new object[] { new CodeMethodInvokeExpression(new CodeMethodReferenceExpression(null, string.Empty)) }; yield return new object[] { new CodeMethodInvokeExpression(new CodeMethodReferenceExpression(null, "0")) }; yield return new object[] { new CodeMethodInvokeExpression(new CodeMethodReferenceExpression(new CodeExpression(), "name")) }; yield return new object[] { new CodeMethodInvokeExpression(new CodeMethodReferenceExpression(new CodePrimitiveExpression(1), "name", new CodeTypeReference[] { new CodeTypeReference() })) }; yield return new object[] { new CodeMethodInvokeExpression(new CodeMethodReferenceExpression(new CodePrimitiveExpression(1), "name", new CodeTypeReference[] { new CodeTypeReference("0") })) }; yield return new object[] { new CodeMethodInvokeExpression(new CodeMethodReferenceExpression(new CodePrimitiveExpression(1), "name", new CodeTypeReference[] { invalidTypeReference1 })) }; yield return new object[] { new CodeMethodInvokeExpression(new CodeMethodReferenceExpression(new CodePrimitiveExpression(1), "name", new CodeTypeReference[] { invalidTypeReference2 })) }; yield return new object[] { new CodeMethodInvokeExpression(new CodeMethodReferenceExpression(new CodePrimitiveExpression(1), "name", new CodeTypeReference[] { invalidTypeReference3 })) }; yield return new object[] { new CodeMethodInvokeExpression(new CodeMethodReferenceExpression(null, "name"), new CodeExpression()) }; // CodeMethodReferenceExpression. yield return new object[] { new CodeMethodReferenceExpression() }; yield return new object[] { new CodeMethodReferenceExpression(null, null) }; yield return new object[] { new CodeMethodReferenceExpression(null, string.Empty) }; yield return new object[] { new CodeMethodReferenceExpression(null, "0") }; yield return new object[] { new CodeMethodReferenceExpression(new CodeExpression(), "name") }; yield return new object[] { new CodeMethodReferenceExpression(new CodePrimitiveExpression(1), "name", new CodeTypeReference[] { new CodeTypeReference() }) }; yield return new object[] { new CodeMethodReferenceExpression(new CodePrimitiveExpression(1), "name", new CodeTypeReference[] { new CodeTypeReference("0") }) }; yield return new object[] { new CodeMethodReferenceExpression(new CodePrimitiveExpression(1), "name", new CodeTypeReference[] { invalidTypeReference1 }) }; yield return new object[] { new CodeMethodReferenceExpression(new CodePrimitiveExpression(1), "name", new CodeTypeReference[] { invalidTypeReference2 }) }; yield return new object[] { new CodeMethodReferenceExpression(new CodePrimitiveExpression(1), "name", new CodeTypeReference[] { invalidTypeReference3 }) }; // CodeEventReferenceExpression. yield return new object[] { new CodeEventReferenceExpression() }; yield return new object[] { new CodeEventReferenceExpression(null, null) }; yield return new object[] { new CodeEventReferenceExpression(null, string.Empty) }; yield return new object[] { new CodeEventReferenceExpression(null, "0") }; yield return new object[] { new CodeEventReferenceExpression(new CodeExpression(), "name") }; // CodeDelegateInvokeExpression. yield return new object[] { new CodeDelegateInvokeExpression(new CodeExpression()) }; yield return new object[] { new CodeDelegateInvokeExpression(new CodePrimitiveExpression(1), new CodeExpression()) }; // CodeObjectCreateExpression. yield return new object[] { new CodeObjectCreateExpression(new CodeTypeReference()) }; yield return new object[] { new CodeObjectCreateExpression(new CodeTypeReference("0")) }; yield return new object[] { new CodeObjectCreateExpression(invalidTypeReference1) }; yield return new object[] { new CodeObjectCreateExpression(invalidTypeReference2) }; yield return new object[] { new CodeObjectCreateExpression(invalidTypeReference3) }; yield return new object[] { new CodeObjectCreateExpression(new CodeTypeReference("name"), new CodeExpression()) }; // CodeDirectionExpression. yield return new object[] { new CodeDirectionExpression(FieldDirection.In, new CodeExpression()) }; // CodePropertyReferenceExpression. yield return new object[] { new CodePropertyReferenceExpression() }; yield return new object[] { new CodePropertyReferenceExpression(null, null) }; yield return new object[] { new CodePropertyReferenceExpression(null, string.Empty) }; yield return new object[] { new CodePropertyReferenceExpression(null, "0") }; yield return new object[] { new CodePropertyReferenceExpression(new CodeExpression(), "name") }; // CodeTypeReferenceExpression. yield return new object[] { new CodeTypeReferenceExpression(new CodeTypeReference()) }; yield return new object[] { new CodeTypeReferenceExpression(new CodeTypeReference("0")) }; yield return new object[] { new CodeTypeReferenceExpression(invalidTypeReference1) }; yield return new object[] { new CodeTypeReferenceExpression(invalidTypeReference2) }; yield return new object[] { new CodeTypeReferenceExpression(invalidTypeReference3) }; // CodeTypeOfExpression. yield return new object[] { new CodeTypeOfExpression(new CodeTypeReference()) }; yield return new object[] { new CodeTypeOfExpression(new CodeTypeReference("0")) }; yield return new object[] { new CodeTypeOfExpression(invalidTypeReference1) }; yield return new object[] { new CodeTypeOfExpression(invalidTypeReference2) }; yield return new object[] { new CodeTypeOfExpression(invalidTypeReference3) }; // CodeMethodReturnStatement. yield return new object[] { new CodeMethodReturnStatement(new CodeExpression()) }; // CodeConditionStatement. yield return new object[] { new CodeConditionStatement(new CodePrimitiveExpression("1"), new CodeStatement[] { new CodeStatement() }, new CodeStatement[] { new CodeMethodReturnStatement(), new CodeMethodReturnStatement { LinePragma = new CodeLinePragma() } }) }; yield return new object[] { new CodeConditionStatement(new CodePrimitiveExpression("1"), new CodeStatement[] { new CodeMethodReturnStatement() }, new CodeStatement[] { new CodeStatement() }) }; // CodeTryCatchFinallyStatement. yield return new object[] { new CodeTryCatchFinallyStatement( new CodeStatement[] { new CodeStatement() }, new CodeCatchClause[] { new CodeCatchClause("localName") }, new CodeStatement[] { new CodeMethodReturnStatement() } ) }; yield return new object[] { new CodeTryCatchFinallyStatement( new CodeStatement[] { new CodeMethodReturnStatement() }, new CodeCatchClause[] { new CodeCatchClause() }, new CodeStatement[] { new CodeMethodReturnStatement() } ) }; yield return new object[] { new CodeTryCatchFinallyStatement( new CodeStatement[] { new CodeMethodReturnStatement() }, new CodeCatchClause[] { new CodeCatchClause(null) }, new CodeStatement[] { new CodeMethodReturnStatement() } ) }; yield return new object[] { new CodeTryCatchFinallyStatement( new CodeStatement[] { new CodeMethodReturnStatement() }, new CodeCatchClause[] { new CodeCatchClause(string.Empty) }, new CodeStatement[] { new CodeMethodReturnStatement() } ) }; yield return new object[] { new CodeTryCatchFinallyStatement( new CodeStatement[] { new CodeMethodReturnStatement() }, new CodeCatchClause[] { new CodeCatchClause("0") }, new CodeStatement[] { new CodeMethodReturnStatement() } ) }; yield return new object[] { new CodeTryCatchFinallyStatement( new CodeStatement[] { new CodeMethodReturnStatement() }, new CodeCatchClause[] { new CodeCatchClause("localName", new CodeTypeReference()) }, new CodeStatement[] { new CodeMethodReturnStatement() } ) }; yield return new object[] { new CodeTryCatchFinallyStatement( new CodeStatement[] { new CodeMethodReturnStatement() }, new CodeCatchClause[] { new CodeCatchClause("localName", new CodeTypeReference("0")) }, new CodeStatement[] { new CodeMethodReturnStatement() } ) }; yield return new object[] { new CodeTryCatchFinallyStatement( new CodeStatement[] { new CodeMethodReturnStatement() }, new CodeCatchClause[] { new CodeCatchClause("localName", new CodeTypeReference("exceptionType"), new CodeStatement()) }, new CodeStatement[] { new CodeMethodReturnStatement() } ) }; yield return new object[] { new CodeTryCatchFinallyStatement( new CodeStatement[] { new CodeMethodReturnStatement() }, new CodeCatchClause[] { new CodeCatchClause("localName") }, new CodeStatement[] { new CodeStatement() } ) }; // CodeAssignStatement. yield return new object[] { new CodeAssignStatement(new CodeExpression(), new CodePrimitiveExpression(1)) }; yield return new object[] { new CodeAssignStatement(new CodePrimitiveExpression(1), new CodeExpression()) }; // CodeExpressionStatement. yield return new object[] { new CodeExpressionStatement(new CodeExpression()) }; // CodeIterationStatement. yield return new object[] { new CodeIterationStatement(new CodeStatement(), new CodePrimitiveExpression(1), new CodeMethodReturnStatement()) }; yield return new object[] { new CodeIterationStatement(new CodeMethodReturnStatement(), new CodeExpression(), new CodeMethodReturnStatement()) }; yield return new object[] { new CodeIterationStatement(new CodeMethodReturnStatement(), new CodePrimitiveExpression(1), new CodeStatement()) }; yield return new object[] { new CodeIterationStatement(new CodeMethodReturnStatement(), new CodePrimitiveExpression(1), new CodeMethodReturnStatement(), new CodeStatement()) }; // CodeThrowExceptionStatement. yield return new object[] { new CodeThrowExceptionStatement(new CodeExpression()) }; // CodeVariableDeclarationStatement. yield return new object[] { new CodeVariableDeclarationStatement() }; yield return new object[] { new CodeVariableDeclarationStatement(new CodeTypeReference(), "name") }; yield return new object[] { new CodeVariableDeclarationStatement(new CodeTypeReference("0"), "name") }; yield return new object[] { new CodeVariableDeclarationStatement(invalidTypeReference1, "name") }; yield return new object[] { new CodeVariableDeclarationStatement(invalidTypeReference2, "name") }; yield return new object[] { new CodeVariableDeclarationStatement(invalidTypeReference3, "name") }; yield return new object[] { new CodeVariableDeclarationStatement(new CodeTypeReference("name"), null) }; yield return new object[] { new CodeVariableDeclarationStatement(new CodeTypeReference("name"), string.Empty) }; yield return new object[] { new CodeVariableDeclarationStatement(new CodeTypeReference("name"), "0") }; yield return new object[] { new CodeVariableDeclarationStatement(new CodeTypeReference("name"), "name", new CodeExpression()) }; // CodeAttachEventStatement. yield return new object[] { new CodeAttachEventStatement() }; yield return new object[] { new CodeAttachEventStatement(new CodeEventReferenceExpression(), new CodePrimitiveExpression(1)) }; yield return new object[] { new CodeAttachEventStatement(new CodeEventReferenceExpression(null, null), new CodePrimitiveExpression(1)) }; yield return new object[] { new CodeAttachEventStatement(new CodeEventReferenceExpression(null, string.Empty), new CodePrimitiveExpression(1)) }; yield return new object[] { new CodeAttachEventStatement(new CodeEventReferenceExpression(null, "0"), new CodePrimitiveExpression(1)) }; yield return new object[] { new CodeAttachEventStatement(new CodeEventReferenceExpression(new CodeExpression(), "name"), new CodePrimitiveExpression(1)) }; yield return new object[] { new CodeAttachEventStatement(null, new CodeExpression()) }; // CodeRemoveEventStatement. yield return new object[] { new CodeRemoveEventStatement() }; yield return new object[] { new CodeRemoveEventStatement(new CodeEventReferenceExpression(), new CodePrimitiveExpression(1)) }; yield return new object[] { new CodeRemoveEventStatement(new CodeEventReferenceExpression(null, null), new CodePrimitiveExpression(1)) }; yield return new object[] { new CodeRemoveEventStatement(new CodeEventReferenceExpression(null, string.Empty), new CodePrimitiveExpression(1)) }; yield return new object[] { new CodeRemoveEventStatement(new CodeEventReferenceExpression(null, "0"), new CodePrimitiveExpression(1)) }; yield return new object[] { new CodeRemoveEventStatement(new CodeEventReferenceExpression(new CodeExpression(), "name"), new CodePrimitiveExpression(1)) }; yield return new object[] { new CodeRemoveEventStatement(null, new CodeExpression()) }; // CodeGotoStatement. yield return new object[] { new CodeGotoStatement() }; yield return new object[] { new CodeGotoStatement("0") }; // CodeLabeledStatement. yield return new object[] { new CodeLabeledStatement() }; yield return new object[] { new CodeLabeledStatement(null) }; yield return new object[] { new CodeLabeledStatement(string.Empty) }; yield return new object[] { new CodeLabeledStatement("0") }; yield return new object[] { new CodeLabeledStatement("name", new CodeStatement()) }; // Misc. yield return new object[] { new CodeStatement() }; yield return new object[] { new CustomCodeStatement() }; yield return new object[] { new CodeExpression() }; yield return new object[] { new CustomCodeExpression() }; yield return new object[] { new CodeDirective() }; yield return new object[] { new CustomCodeDirective() }; yield return new object[] { new CodeTypeParameter() }; yield return new object[] { new CodeTypeParameter("name") }; yield return new object[] { new CodeObject() }; yield return new object[] { new CustomCodeObject() }; yield return new object[] { new CodeTypeMember() }; yield return new object[] { new CustomCodeTypeMember() }; yield return new object[] { new CodeTypeReference(";") }; yield return new object[] { new CodeTypeReference("/") }; yield return new object[] { new CodeTypeReference("#") }; yield return new object[] { new CodeTypeReference("%") }; yield return new object[] { new CodeTypeReference("=") }; yield return new object[] { new CodeTypeReference("?") }; yield return new object[] { new CodeTypeReference("\\") }; yield return new object[] { new CodeTypeReference("^") }; yield return new object[] { new CodeTypeReference("'") }; yield return new object[] { new CodeTypeReference(")") }; yield return new object[] { new CodeTypeReference("(") }; } public static IEnumerable<object[]> ValidIdentifier_InvalidMemberInType_TestData() { foreach (object[] testData in ValidateIdentifiers_Invalid_TestData()) { if (testData[0] is CodeTypeMember member) { var t = new CodeTypeDeclaration("name"); t.Members.Add(member); yield return new object[] { t }; var n = new CodeNamespace("namespace"); n.Types.Add(t); yield return new object[] { n }; } else if (testData[0] is CodeTypeDeclaration type) { var n = new CodeNamespace(); n.Types.Add(type); yield return new object[] { n }; } } } [Theory] [MemberData(nameof(ValidateIdentifiers_Invalid_TestData))] [MemberData(nameof(ValidIdentifier_InvalidMemberInType_TestData))] [SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework, "Fixed incorrect param name in some situations")] public void ValidateIdentifiers_InvalidE_ThrowsArgumentException(CodeObject e) { AssertExtensions.Throws<ArgumentException>("e", () => CodeGenerator.ValidateIdentifiers(e)); } public static IEnumerable<object[]> ValidateIdentifiers_NullE_TestData() { yield return new object[] { null }; var invalidTypeAttribute = new CodeTypeDeclaration("name"); invalidTypeAttribute.CustomAttributes.Add(new CodeAttributeDeclaration("name", new CodeAttributeArgument())); yield return new object[] { invalidTypeAttribute }; var invalidTypeParameterAttribute = new CodeTypeParameter("parameter"); invalidTypeParameterAttribute.CustomAttributes.Add(new CodeAttributeDeclaration("attribute", new CodeAttributeArgument())); var invalidTypeTypeParameterAttribute = new CodeTypeDeclaration("name"); invalidTypeTypeParameterAttribute.TypeParameters.Add(invalidTypeParameterAttribute); yield return new object[] { invalidTypeTypeParameterAttribute }; yield return new object[] { new CodeBinaryOperatorExpression() }; yield return new object[] { new CodeBinaryOperatorExpression(null, CodeBinaryOperatorType.Add, new CodePrimitiveExpression(2)) }; yield return new object[] { new CodeBinaryOperatorExpression(new CodePrimitiveExpression(1), CodeBinaryOperatorType.Add, null) }; yield return new object[] { new CodeCastExpression() }; yield return new object[] { new CodeCastExpression(new CodeTypeReference("name"), null) }; yield return new object[] { new CodeDelegateCreateExpression() }; yield return new object[] { new CodeDelegateCreateExpression(new CodeTypeReference("name"), null, "methodName") }; yield return new object[] { new CodeIndexerExpression() }; yield return new object[] { new CodeIndexerExpression(null) }; yield return new object[] { new CodeArrayIndexerExpression() }; yield return new object[] { new CodeArrayIndexerExpression(null) }; yield return new object[] { new CodeDirectionExpression() }; yield return new object[] { new CodeDirectionExpression(FieldDirection.In, null) }; yield return new object[] { new CodeExpressionStatement() }; yield return new object[] { new CodeExpressionStatement(null) }; yield return new object[] { new CodeConditionStatement() }; yield return new object[] { new CodeConditionStatement(null) }; yield return new object[] { new CodeAssignStatement() }; yield return new object[] { new CodeAssignStatement(null, new CodePrimitiveExpression(1)) }; yield return new object[] { new CodeAssignStatement(new CodePrimitiveExpression(1), null) }; yield return new object[] { new CodeIterationStatement() }; yield return new object[] { new CodeIterationStatement(null, new CodePrimitiveExpression(1), new CodeMethodReturnStatement()) }; yield return new object[] { new CodeIterationStatement(new CodeMethodReturnStatement(), null, new CodeMethodReturnStatement()) }; yield return new object[] { new CodeIterationStatement(new CodeMethodReturnStatement(), new CodePrimitiveExpression(1), null) }; yield return new object[] { new CodeAttachEventStatement(new CodeEventReferenceExpression(new CodePrimitiveExpression(1), "name"), null) }; yield return new object[] { new CodeRemoveEventStatement(new CodeEventReferenceExpression(new CodePrimitiveExpression(1), "name"), null) }; } [Theory] [MemberData(nameof(ValidateIdentifiers_NullE_TestData))] [SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework, "Fixed NullReferenceException")] public void ValidateIdentifiers_NullE_ThrowsArgumentNullException(CodeObject e) { AssertExtensions.Throws<ArgumentNullException>("e", () => CodeGenerator.ValidateIdentifiers(e)); } private class CustomCodeExpression : CodeExpression { } private class CustomCodeStatement : CodeStatement { } private class CustomCodeTypeMember : CodeTypeMember { } private class CustomCodeDirective : CodeDirective { } private class CustomCodeObject : CodeObject { } } }
-1
dotnet/runtime
66,235
Revert "Fix issues related to JsonSerializerOptions mutation and caching."
Reverts dotnet/runtime#65863 Fixes #66232
jkotas
2022-03-05T06:23:45Z
2022-03-05T14:08:07Z
17662fc30cfd4cc6e7dbba978a3fb512380c0b70
3ede8095da51d5d27a890020b61376155e9a61c2
Revert "Fix issues related to JsonSerializerOptions mutation and caching.". Reverts dotnet/runtime#65863 Fixes #66232
./src/tests/JIT/Directed/shift/uint64Opt_r.csproj
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <DebugType /> <Optimize /> </PropertyGroup> <ItemGroup> <Compile Include="uint64Opt.cs" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <DebugType /> <Optimize /> </PropertyGroup> <ItemGroup> <Compile Include="uint64Opt.cs" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,235
Revert "Fix issues related to JsonSerializerOptions mutation and caching."
Reverts dotnet/runtime#65863 Fixes #66232
jkotas
2022-03-05T06:23:45Z
2022-03-05T14:08:07Z
17662fc30cfd4cc6e7dbba978a3fb512380c0b70
3ede8095da51d5d27a890020b61376155e9a61c2
Revert "Fix issues related to JsonSerializerOptions mutation and caching.". Reverts dotnet/runtime#65863 Fixes #66232
./src/tests/JIT/Regression/JitBlue/Runtime_57064/Runtime_57064.csproj
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> </PropertyGroup> <PropertyGroup> <DebugType>None</DebugType> <Optimize>True</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="$(MSBuildProjectName).cs" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> </PropertyGroup> <PropertyGroup> <DebugType>None</DebugType> <Optimize>True</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="$(MSBuildProjectName).cs" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,235
Revert "Fix issues related to JsonSerializerOptions mutation and caching."
Reverts dotnet/runtime#65863 Fixes #66232
jkotas
2022-03-05T06:23:45Z
2022-03-05T14:08:07Z
17662fc30cfd4cc6e7dbba978a3fb512380c0b70
3ede8095da51d5d27a890020b61376155e9a61c2
Revert "Fix issues related to JsonSerializerOptions mutation and caching.". Reverts dotnet/runtime#65863 Fixes #66232
./src/libraries/System.Reflection.Metadata/tests/Metadata/Ecma335/Encoding/ExceptionRegionEncoderTests.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Reflection.Metadata.Tests; using Xunit; namespace System.Reflection.Metadata.Ecma335.Tests { public class ExceptionRegionEncoderTests { [Fact] public void IsSmallRegionCount() { Assert.True(ExceptionRegionEncoder.IsSmallRegionCount(0)); Assert.True(ExceptionRegionEncoder.IsSmallRegionCount(20)); Assert.False(ExceptionRegionEncoder.IsSmallRegionCount(-1)); Assert.False(ExceptionRegionEncoder.IsSmallRegionCount(21)); Assert.False(ExceptionRegionEncoder.IsSmallRegionCount(int.MinValue)); Assert.False(ExceptionRegionEncoder.IsSmallRegionCount(int.MaxValue)); } [Fact] public void IsSmallExceptionRegion() { Assert.True(ExceptionRegionEncoder.IsSmallExceptionRegion(0, 0)); Assert.True(ExceptionRegionEncoder.IsSmallExceptionRegion(ushort.MaxValue, byte.MaxValue)); Assert.False(ExceptionRegionEncoder.IsSmallExceptionRegion(ushort.MaxValue + 1, byte.MaxValue)); Assert.False(ExceptionRegionEncoder.IsSmallExceptionRegion(ushort.MaxValue, byte.MaxValue + 1)); Assert.False(ExceptionRegionEncoder.IsSmallExceptionRegion(-1, 0)); Assert.False(ExceptionRegionEncoder.IsSmallExceptionRegion(0, -1)); Assert.False(ExceptionRegionEncoder.IsSmallExceptionRegion(int.MinValue, int.MinValue)); Assert.False(ExceptionRegionEncoder.IsSmallExceptionRegion(int.MaxValue, int.MaxValue)); Assert.False(ExceptionRegionEncoder.IsSmallExceptionRegion(int.MaxValue, int.MinValue)); Assert.False(ExceptionRegionEncoder.IsSmallExceptionRegion(int.MinValue, int.MaxValue)); } [Fact] public void SerializeTableHeader() { var builder = new BlobBuilder(); builder.WriteByte(0xff); ExceptionRegionEncoder.SerializeTableHeader(builder, ExceptionRegionEncoder.MaxSmallExceptionRegions, hasSmallRegions: true); AssertEx.Equal(new byte[] { 0xff, 0x00, 0x00, 0x00, // padding 0x01, // flags 0xf4, // size 0x00, 0x00 }, builder.ToArray()); builder.Clear(); builder.WriteByte(0xff); ExceptionRegionEncoder.SerializeTableHeader(builder, ExceptionRegionEncoder.MaxExceptionRegions, hasSmallRegions: false); AssertEx.Equal(new byte[] { 0xff, 0x00, 0x00, 0x00, // padding 0x41, // flags 0xf4, 0xff, 0xff, // size }, builder.ToArray()); } [Fact] public void Add_Small() { var builder = new BlobBuilder(); var encoder = new ExceptionRegionEncoder(builder, hasSmallFormat: true); encoder.Add(ExceptionRegionKind.Catch, 1, 2, 4, 5, catchType: MetadataTokens.TypeDefinitionHandle(1)); AssertEx.Equal(new byte[] { 0x00, 0x00, // kind 0x01, 0x00, // try offset 0x02, // try length 0x04, 0x00, // handler offset 0x05, // handler length 0x01, 0x00, 0x00, 0x02 // catch type }, builder.ToArray()); builder.Clear(); encoder.Add(ExceptionRegionKind.Filter, 0xffff, 0xff, 0xffff, 0xff, filterOffset: int.MaxValue); AssertEx.Equal(new byte[] { 0x01, 0x00, // kind 0xff, 0xff, // try offset 0xff, // try length 0xff, 0xff, // handler offset 0xff, // handler length 0xff, 0xff, 0xff, 0x7f // filter offset }, builder.ToArray()); builder.Clear(); encoder.Add(ExceptionRegionKind.Fault, 0xffff, 0xff, 0xffff, 0xff); AssertEx.Equal(new byte[] { 0x04, 0x00, // kind 0xff, 0xff, // try offset 0xff, // try length 0xff, 0xff, // handler offset 0xff, // handler length 0x00, 0x00, 0x00, 0x00 }, builder.ToArray()); builder.Clear(); encoder.Add(ExceptionRegionKind.Finally, 0, 0, 0, 0); AssertEx.Equal(new byte[] { 0x02, 0x00, // kind 0x00, 0x00, // try offset 0x00, // try length 0x00, 0x00, // handler offset 0x00, // handler length 0x00, 0x00, 0x00, 0x00 }, builder.ToArray()); builder.Clear(); } [Fact] public void Add_Large() { var builder = new BlobBuilder(); var encoder = new ExceptionRegionEncoder(builder, hasSmallFormat: false); encoder.Add(ExceptionRegionKind.Catch, 1, 2, 4, 5, catchType: MetadataTokens.TypeDefinitionHandle(1)); AssertEx.Equal(new byte[] { 0x00, 0x00, 0x00, 0x00, // kind 0x01, 0x00, 0x00, 0x00, // try offset 0x02, 0x00, 0x00, 0x00, // try length 0x04, 0x00, 0x00, 0x00, // handler offset 0x05, 0x00, 0x00, 0x00, // handler length 0x01, 0x00, 0x00, 0x02 // catch type }, builder.ToArray()); builder.Clear(); encoder.Add(ExceptionRegionKind.Filter, int.MaxValue, int.MaxValue, int.MaxValue, int.MaxValue, filterOffset: int.MaxValue); AssertEx.Equal(new byte[] { 0x01, 0x00, 0x00, 0x00, // kind 0xff, 0xff, 0xff, 0x7f, // try offset 0xff, 0xff, 0xff, 0x7f, // try length 0xff, 0xff, 0xff, 0x7f, // handler offset 0xff, 0xff, 0xff, 0x7f, // handler length 0xff, 0xff, 0xff, 0x7f // filter offset }, builder.ToArray()); builder.Clear(); encoder.Add(ExceptionRegionKind.Fault, int.MaxValue, int.MaxValue, int.MaxValue, int.MaxValue); AssertEx.Equal(new byte[] { 0x04, 0x00, 0x00, 0x00, // kind 0xff, 0xff, 0xff, 0x7f, // try offset 0xff, 0xff, 0xff, 0x7f, // try length 0xff, 0xff, 0xff, 0x7f, // handler offset 0xff, 0xff, 0xff, 0x7f, // handler length 0x00, 0x00, 0x00, 0x00 }, builder.ToArray()); builder.Clear(); encoder.Add(ExceptionRegionKind.Finally, 0, 0, 0, 0); AssertEx.Equal(new byte[] { 0x02, 0x00, 0x00, 0x00, // kind 0x00, 0x00, 0x00, 0x00, // try offset 0x00, 0x00, 0x00, 0x00, // try length 0x00, 0x00, 0x00, 0x00, // handler offset 0x00, 0x00, 0x00, 0x00, // handler length 0x00, 0x00, 0x00, 0x00 }, builder.ToArray()); builder.Clear(); } [Fact] public void Add_Errors() { Assert.Throws<InvalidOperationException>(() => default(ExceptionRegionEncoder).Add(ExceptionRegionKind.Fault, 0, 0, 0, 0)); var builder = new BlobBuilder(); var smallEncoder = new ExceptionRegionEncoder(builder, hasSmallFormat: true); var fatEncoder = new ExceptionRegionEncoder(builder, hasSmallFormat: false); Assert.Throws<ArgumentOutOfRangeException>(() => smallEncoder.Add(ExceptionRegionKind.Finally, -1, 2, 4, 5)); Assert.Throws<ArgumentOutOfRangeException>(() => smallEncoder.Add(ExceptionRegionKind.Finally, 1, -1, 4, 5)); Assert.Throws<ArgumentOutOfRangeException>(() => smallEncoder.Add(ExceptionRegionKind.Finally, 1, 2, -1, 5)); Assert.Throws<ArgumentOutOfRangeException>(() => smallEncoder.Add(ExceptionRegionKind.Finally, 1, 2, 4, -1)); Assert.Throws<ArgumentOutOfRangeException>(() => smallEncoder.Add(ExceptionRegionKind.Finally, 0x10000, 2, 4, 5)); Assert.Throws<ArgumentOutOfRangeException>(() => smallEncoder.Add(ExceptionRegionKind.Finally, 1, 0x100, 4, 5)); Assert.Throws<ArgumentOutOfRangeException>(() => smallEncoder.Add(ExceptionRegionKind.Finally, 1, 2, 0x10000, 5)); Assert.Throws<ArgumentOutOfRangeException>(() => smallEncoder.Add(ExceptionRegionKind.Finally, 1, 2, 4, 0x100)); Assert.Throws<ArgumentOutOfRangeException>(() => fatEncoder.Add(ExceptionRegionKind.Finally, -1, 2, 4, 5)); Assert.Throws<ArgumentOutOfRangeException>(() => fatEncoder.Add(ExceptionRegionKind.Finally, 1, -1, 4, 5)); Assert.Throws<ArgumentOutOfRangeException>(() => fatEncoder.Add(ExceptionRegionKind.Finally, 1, 2, -1, 5)); Assert.Throws<ArgumentOutOfRangeException>(() => fatEncoder.Add(ExceptionRegionKind.Finally, 1, 2, 4, -1)); Assert.Throws<ArgumentOutOfRangeException>(() => fatEncoder.Add((ExceptionRegionKind)5, 1, 2, 4, 5)); Assert.Throws<ArgumentOutOfRangeException>(() => fatEncoder.Add(ExceptionRegionKind.Filter, 1, 2, 4, 5, filterOffset: -1)); AssertExtensions.Throws<ArgumentException>("catchType", () => fatEncoder.Add(ExceptionRegionKind.Catch, 1, 2, 4, 5, catchType: default(EntityHandle))); AssertExtensions.Throws<ArgumentException>("catchType", () => fatEncoder.Add(ExceptionRegionKind.Catch, 1, 2, 4, 5, catchType: MetadataTokens.ImportScopeHandle(1))); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Reflection.Metadata.Tests; using Xunit; namespace System.Reflection.Metadata.Ecma335.Tests { public class ExceptionRegionEncoderTests { [Fact] public void IsSmallRegionCount() { Assert.True(ExceptionRegionEncoder.IsSmallRegionCount(0)); Assert.True(ExceptionRegionEncoder.IsSmallRegionCount(20)); Assert.False(ExceptionRegionEncoder.IsSmallRegionCount(-1)); Assert.False(ExceptionRegionEncoder.IsSmallRegionCount(21)); Assert.False(ExceptionRegionEncoder.IsSmallRegionCount(int.MinValue)); Assert.False(ExceptionRegionEncoder.IsSmallRegionCount(int.MaxValue)); } [Fact] public void IsSmallExceptionRegion() { Assert.True(ExceptionRegionEncoder.IsSmallExceptionRegion(0, 0)); Assert.True(ExceptionRegionEncoder.IsSmallExceptionRegion(ushort.MaxValue, byte.MaxValue)); Assert.False(ExceptionRegionEncoder.IsSmallExceptionRegion(ushort.MaxValue + 1, byte.MaxValue)); Assert.False(ExceptionRegionEncoder.IsSmallExceptionRegion(ushort.MaxValue, byte.MaxValue + 1)); Assert.False(ExceptionRegionEncoder.IsSmallExceptionRegion(-1, 0)); Assert.False(ExceptionRegionEncoder.IsSmallExceptionRegion(0, -1)); Assert.False(ExceptionRegionEncoder.IsSmallExceptionRegion(int.MinValue, int.MinValue)); Assert.False(ExceptionRegionEncoder.IsSmallExceptionRegion(int.MaxValue, int.MaxValue)); Assert.False(ExceptionRegionEncoder.IsSmallExceptionRegion(int.MaxValue, int.MinValue)); Assert.False(ExceptionRegionEncoder.IsSmallExceptionRegion(int.MinValue, int.MaxValue)); } [Fact] public void SerializeTableHeader() { var builder = new BlobBuilder(); builder.WriteByte(0xff); ExceptionRegionEncoder.SerializeTableHeader(builder, ExceptionRegionEncoder.MaxSmallExceptionRegions, hasSmallRegions: true); AssertEx.Equal(new byte[] { 0xff, 0x00, 0x00, 0x00, // padding 0x01, // flags 0xf4, // size 0x00, 0x00 }, builder.ToArray()); builder.Clear(); builder.WriteByte(0xff); ExceptionRegionEncoder.SerializeTableHeader(builder, ExceptionRegionEncoder.MaxExceptionRegions, hasSmallRegions: false); AssertEx.Equal(new byte[] { 0xff, 0x00, 0x00, 0x00, // padding 0x41, // flags 0xf4, 0xff, 0xff, // size }, builder.ToArray()); } [Fact] public void Add_Small() { var builder = new BlobBuilder(); var encoder = new ExceptionRegionEncoder(builder, hasSmallFormat: true); encoder.Add(ExceptionRegionKind.Catch, 1, 2, 4, 5, catchType: MetadataTokens.TypeDefinitionHandle(1)); AssertEx.Equal(new byte[] { 0x00, 0x00, // kind 0x01, 0x00, // try offset 0x02, // try length 0x04, 0x00, // handler offset 0x05, // handler length 0x01, 0x00, 0x00, 0x02 // catch type }, builder.ToArray()); builder.Clear(); encoder.Add(ExceptionRegionKind.Filter, 0xffff, 0xff, 0xffff, 0xff, filterOffset: int.MaxValue); AssertEx.Equal(new byte[] { 0x01, 0x00, // kind 0xff, 0xff, // try offset 0xff, // try length 0xff, 0xff, // handler offset 0xff, // handler length 0xff, 0xff, 0xff, 0x7f // filter offset }, builder.ToArray()); builder.Clear(); encoder.Add(ExceptionRegionKind.Fault, 0xffff, 0xff, 0xffff, 0xff); AssertEx.Equal(new byte[] { 0x04, 0x00, // kind 0xff, 0xff, // try offset 0xff, // try length 0xff, 0xff, // handler offset 0xff, // handler length 0x00, 0x00, 0x00, 0x00 }, builder.ToArray()); builder.Clear(); encoder.Add(ExceptionRegionKind.Finally, 0, 0, 0, 0); AssertEx.Equal(new byte[] { 0x02, 0x00, // kind 0x00, 0x00, // try offset 0x00, // try length 0x00, 0x00, // handler offset 0x00, // handler length 0x00, 0x00, 0x00, 0x00 }, builder.ToArray()); builder.Clear(); } [Fact] public void Add_Large() { var builder = new BlobBuilder(); var encoder = new ExceptionRegionEncoder(builder, hasSmallFormat: false); encoder.Add(ExceptionRegionKind.Catch, 1, 2, 4, 5, catchType: MetadataTokens.TypeDefinitionHandle(1)); AssertEx.Equal(new byte[] { 0x00, 0x00, 0x00, 0x00, // kind 0x01, 0x00, 0x00, 0x00, // try offset 0x02, 0x00, 0x00, 0x00, // try length 0x04, 0x00, 0x00, 0x00, // handler offset 0x05, 0x00, 0x00, 0x00, // handler length 0x01, 0x00, 0x00, 0x02 // catch type }, builder.ToArray()); builder.Clear(); encoder.Add(ExceptionRegionKind.Filter, int.MaxValue, int.MaxValue, int.MaxValue, int.MaxValue, filterOffset: int.MaxValue); AssertEx.Equal(new byte[] { 0x01, 0x00, 0x00, 0x00, // kind 0xff, 0xff, 0xff, 0x7f, // try offset 0xff, 0xff, 0xff, 0x7f, // try length 0xff, 0xff, 0xff, 0x7f, // handler offset 0xff, 0xff, 0xff, 0x7f, // handler length 0xff, 0xff, 0xff, 0x7f // filter offset }, builder.ToArray()); builder.Clear(); encoder.Add(ExceptionRegionKind.Fault, int.MaxValue, int.MaxValue, int.MaxValue, int.MaxValue); AssertEx.Equal(new byte[] { 0x04, 0x00, 0x00, 0x00, // kind 0xff, 0xff, 0xff, 0x7f, // try offset 0xff, 0xff, 0xff, 0x7f, // try length 0xff, 0xff, 0xff, 0x7f, // handler offset 0xff, 0xff, 0xff, 0x7f, // handler length 0x00, 0x00, 0x00, 0x00 }, builder.ToArray()); builder.Clear(); encoder.Add(ExceptionRegionKind.Finally, 0, 0, 0, 0); AssertEx.Equal(new byte[] { 0x02, 0x00, 0x00, 0x00, // kind 0x00, 0x00, 0x00, 0x00, // try offset 0x00, 0x00, 0x00, 0x00, // try length 0x00, 0x00, 0x00, 0x00, // handler offset 0x00, 0x00, 0x00, 0x00, // handler length 0x00, 0x00, 0x00, 0x00 }, builder.ToArray()); builder.Clear(); } [Fact] public void Add_Errors() { Assert.Throws<InvalidOperationException>(() => default(ExceptionRegionEncoder).Add(ExceptionRegionKind.Fault, 0, 0, 0, 0)); var builder = new BlobBuilder(); var smallEncoder = new ExceptionRegionEncoder(builder, hasSmallFormat: true); var fatEncoder = new ExceptionRegionEncoder(builder, hasSmallFormat: false); Assert.Throws<ArgumentOutOfRangeException>(() => smallEncoder.Add(ExceptionRegionKind.Finally, -1, 2, 4, 5)); Assert.Throws<ArgumentOutOfRangeException>(() => smallEncoder.Add(ExceptionRegionKind.Finally, 1, -1, 4, 5)); Assert.Throws<ArgumentOutOfRangeException>(() => smallEncoder.Add(ExceptionRegionKind.Finally, 1, 2, -1, 5)); Assert.Throws<ArgumentOutOfRangeException>(() => smallEncoder.Add(ExceptionRegionKind.Finally, 1, 2, 4, -1)); Assert.Throws<ArgumentOutOfRangeException>(() => smallEncoder.Add(ExceptionRegionKind.Finally, 0x10000, 2, 4, 5)); Assert.Throws<ArgumentOutOfRangeException>(() => smallEncoder.Add(ExceptionRegionKind.Finally, 1, 0x100, 4, 5)); Assert.Throws<ArgumentOutOfRangeException>(() => smallEncoder.Add(ExceptionRegionKind.Finally, 1, 2, 0x10000, 5)); Assert.Throws<ArgumentOutOfRangeException>(() => smallEncoder.Add(ExceptionRegionKind.Finally, 1, 2, 4, 0x100)); Assert.Throws<ArgumentOutOfRangeException>(() => fatEncoder.Add(ExceptionRegionKind.Finally, -1, 2, 4, 5)); Assert.Throws<ArgumentOutOfRangeException>(() => fatEncoder.Add(ExceptionRegionKind.Finally, 1, -1, 4, 5)); Assert.Throws<ArgumentOutOfRangeException>(() => fatEncoder.Add(ExceptionRegionKind.Finally, 1, 2, -1, 5)); Assert.Throws<ArgumentOutOfRangeException>(() => fatEncoder.Add(ExceptionRegionKind.Finally, 1, 2, 4, -1)); Assert.Throws<ArgumentOutOfRangeException>(() => fatEncoder.Add((ExceptionRegionKind)5, 1, 2, 4, 5)); Assert.Throws<ArgumentOutOfRangeException>(() => fatEncoder.Add(ExceptionRegionKind.Filter, 1, 2, 4, 5, filterOffset: -1)); AssertExtensions.Throws<ArgumentException>("catchType", () => fatEncoder.Add(ExceptionRegionKind.Catch, 1, 2, 4, 5, catchType: default(EntityHandle))); AssertExtensions.Throws<ArgumentException>("catchType", () => fatEncoder.Add(ExceptionRegionKind.Catch, 1, 2, 4, 5, catchType: MetadataTokens.ImportScopeHandle(1))); } } }
-1
dotnet/runtime
66,235
Revert "Fix issues related to JsonSerializerOptions mutation and caching."
Reverts dotnet/runtime#65863 Fixes #66232
jkotas
2022-03-05T06:23:45Z
2022-03-05T14:08:07Z
17662fc30cfd4cc6e7dbba978a3fb512380c0b70
3ede8095da51d5d27a890020b61376155e9a61c2
Revert "Fix issues related to JsonSerializerOptions mutation and caching.". Reverts dotnet/runtime#65863 Fixes #66232
./src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/DependencyAnalysis/ReadyToRun/AttributePresenceFilterNode.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Collections.Generic; using System.Collections.Immutable; using System.Diagnostics; using System.Numerics; using System.Reflection.Metadata; using System.Reflection.Metadata.Ecma335; using Internal.Text; using Internal.TypeSystem.Ecma; namespace ILCompiler.DependencyAnalysis.ReadyToRun { public class AttributePresenceFilterNode : HeaderTableNode { private EcmaModule _module; public override int ClassCode => 56456113; public AttributePresenceFilterNode(EcmaModule module) : base(module.Context.Target) { _module = module; } public override void AppendMangledName(NameMangler nameMangler, Utf8StringBuilder sb) { sb.Append(nameMangler.CompilationUnitPrefix); sb.Append("__ReadyToRunAttributePresenceFilter__"); sb.Append(_module.Assembly.GetName().Name); } private struct CustomAttributeEntry { public string TypeNamespace; public string TypeName; public int Parent; } private List<CustomAttributeEntry> GetCustomAttributeEntries() { MetadataReader reader = _module.MetadataReader; List<CustomAttributeEntry> customAttributeEntries = new List<CustomAttributeEntry>(); foreach (var handle in reader.CustomAttributes) { CustomAttribute customAttribute = reader.GetCustomAttribute(handle); EntityHandle customAttributeConstructorHandle = customAttribute.Constructor; string customAttributeTypeNamespace, customAttributeTypeName; ReadCustomAttributeTypeNameWithoutResolving(customAttributeConstructorHandle, out customAttributeTypeNamespace, out customAttributeTypeName); // System.Runtime.CompilerServices.NullableAttribute is NEVER added to the table (There are *many* of these, and they provide no useful value to the runtime) if (customAttributeTypeNamespace == "System.Runtime.CompilerServices" && customAttributeTypeName == "NullableAttribute") { continue; } bool addToTable = false; if (customAttributeTypeNamespace.StartsWith("System.Runtime.")) { addToTable = true; } else if (customAttributeTypeNamespace == "System") { // Some historical well known attributes were placed in the System namespace. Special case them if (customAttributeTypeName == "ParamArrayAttribute") { addToTable = true; } else if (customAttributeTypeName == "ThreadStaticAttribute") { addToTable = true; } } else if (customAttributeTypeNamespace == "System.Reflection") { // Historical attribute in the System.Reflection namespace if (customAttributeTypeName == "DefaultMemberAttribute") { addToTable = true; } } if (!addToTable) continue; customAttributeEntries.Add(new CustomAttributeEntry { TypeNamespace = customAttributeTypeNamespace, TypeName = customAttributeTypeName, Parent = reader.GetToken(customAttribute.Parent) }); } return customAttributeEntries; } /** * This class is used to extract the first type handle in a signature. * * In the case that a custom attribute's constructor is a MemberReference, * and its parent is a TypeSpec, we have to parse the signature, but we do * not want to actually resolve the types. So we used this dummy signature * type provider to extract the first type handle. */ private class FirstTypeHandleExtractor : ISignatureTypeProvider<DummyType, DummyGenericContext> { private EntityHandle _firstTypeHandle; public EntityHandle FirstTypeHandle => _firstTypeHandle; public DummyType GetTypeFromDefinition(MetadataReader reader, TypeDefinitionHandle handle, byte rawTypeKind) { if (_firstTypeHandle.IsNil) { _firstTypeHandle = handle; } return new DummyType(); } public DummyType GetTypeFromReference(MetadataReader reader, TypeReferenceHandle handle, byte rawTypeKind) { if (_firstTypeHandle.IsNil) { _firstTypeHandle = handle; } return DummyType.Instance; } #region Uninteresting dummy methods // These methods are required by the interface, but it is otherwise uninteresting for our purpose here public DummyType GetArrayType(DummyType elementType, ArrayShape shape) { return DummyType.Instance; } public DummyType GetByReferenceType(DummyType elementType) { return DummyType.Instance; } public DummyType GetFunctionPointerType(MethodSignature<DummyType> signature) { return DummyType.Instance; } public DummyType GetGenericInstantiation(DummyType genericType, ImmutableArray<DummyType> typeArguments) { return DummyType.Instance; } public DummyType GetGenericMethodParameter(DummyGenericContext genericContext, int index) { return DummyType.Instance; } public DummyType GetGenericTypeParameter(DummyGenericContext genericContext, int index) { return DummyType.Instance; } public DummyType GetModifiedType(DummyType modifier, DummyType unmodifiedType, bool isRequired) { return DummyType.Instance; } public DummyType GetPinnedType(DummyType elementType) { return DummyType.Instance; } public DummyType GetPointerType(DummyType elementType) { return DummyType.Instance; } public DummyType GetPrimitiveType(PrimitiveTypeCode typeCode) { return new DummyType(); } public DummyType GetSZArrayType(DummyType elementType) { return DummyType.Instance; } public DummyType GetTypeFromSpecification(MetadataReader reader, DummyGenericContext genericContext, TypeSpecificationHandle handle, byte rawTypeKind) { return DummyType.Instance; } #endregion } #region Uninteresting dummy types private class DummyType { public static DummyType Instance = new DummyType(); } private class DummyGenericContext { } #endregion private void ReadCustomAttributeTypeNameWithoutResolving(EntityHandle customAttributeConstructorHandle, out string customAttributeTypeNamespace, out string customAttributeTypeName) { /** * It is possible that the assembly that defines the attribute is not provided as a reference assembly. * * Most the time, as long as the custom attribute is not accessed or the reference assembly is available at runtime, the code will work just fine. * * If we used _module.GetMethod(customAttributeConstructorHandle), we should have caused an exception and failing the compilation. * * Therefore, we have this alternate path to obtain the type namespace and name. */ if (customAttributeConstructorHandle.Kind == HandleKind.MethodDefinition) { MethodDefinitionHandle customAttributeConstructorDefinitionHandle = (MethodDefinitionHandle)customAttributeConstructorHandle; MethodDefinition customAttributeConstructorDefinition = _module.MetadataReader.GetMethodDefinition(customAttributeConstructorDefinitionHandle); TypeDefinitionHandle customAttributeConstructorTypeDefinitionHandle = customAttributeConstructorDefinition.GetDeclaringType(); GetTypeNameFromTypeDefinitionHandle(customAttributeConstructorTypeDefinitionHandle, out customAttributeTypeNamespace, out customAttributeTypeName); } else if (customAttributeConstructorHandle.Kind == HandleKind.MemberReference) { MemberReferenceHandle customAttributeConstructorReferenceHandle = (MemberReferenceHandle)customAttributeConstructorHandle; MemberReference customAttributeConstructorReference = _module.MetadataReader.GetMemberReference(customAttributeConstructorReferenceHandle); EntityHandle customAttributeConstructorReferenceParentHandle = customAttributeConstructorReference.Parent; if (customAttributeConstructorReferenceParentHandle.Kind == HandleKind.TypeReference) { TypeReferenceHandle customAttributeConstructorTypeReferenceHandle = (TypeReferenceHandle)customAttributeConstructorReferenceParentHandle; GetTypeNameFromTypeReferenceHandle(customAttributeConstructorTypeReferenceHandle, out customAttributeTypeNamespace, out customAttributeTypeName); } else { Debug.Assert(customAttributeConstructorReferenceParentHandle.Kind == HandleKind.TypeSpecification); TypeSpecificationHandle customAttributeConstructorTypeSpecificationHandle = (TypeSpecificationHandle)customAttributeConstructorReferenceParentHandle; TypeSpecification customAttributeConstructorTypeSpecification = _module.MetadataReader.GetTypeSpecification(customAttributeConstructorTypeSpecificationHandle); FirstTypeHandleExtractor fakeSignatureTypeProvider = new FirstTypeHandleExtractor(); customAttributeConstructorTypeSpecification.DecodeSignature(fakeSignatureTypeProvider, new DummyGenericContext()); EntityHandle firstTypeHandle = fakeSignatureTypeProvider.FirstTypeHandle; if (firstTypeHandle.Kind == HandleKind.TypeDefinition) { TypeDefinitionHandle customAttributeConstructorTypeDefinitionHandle = (TypeDefinitionHandle)firstTypeHandle; GetTypeNameFromTypeDefinitionHandle(customAttributeConstructorTypeDefinitionHandle, out customAttributeTypeNamespace, out customAttributeTypeName); } else { Debug.Assert(firstTypeHandle.Kind == HandleKind.TypeReference); TypeReferenceHandle customAttributeConstructorTypeReferenceHandle = (TypeReferenceHandle)firstTypeHandle; GetTypeNameFromTypeReferenceHandle(customAttributeConstructorTypeReferenceHandle, out customAttributeTypeNamespace, out customAttributeTypeName); } } } else { Debug.Assert(false); customAttributeTypeNamespace = null; customAttributeTypeName = null; } } private void GetTypeNameFromTypeReferenceHandle(TypeReferenceHandle typeReferenceHandle, out string typeNamespace, out string typeName) { TypeReference typeReference = _module.MetadataReader.GetTypeReference(typeReferenceHandle); StringHandle typeNamespaceHandle = typeReference.Namespace; StringHandle typeNameHandle = typeReference.Name; typeNamespace = _module.MetadataReader.GetString(typeNamespaceHandle); typeName = _module.MetadataReader.GetString(typeNameHandle); } private void GetTypeNameFromTypeDefinitionHandle(TypeDefinitionHandle typeDefinitionHandle, out string typeNamespace, out string typeName) { TypeDefinition typeDefinition = _module.MetadataReader.GetTypeDefinition(typeDefinitionHandle); StringHandle typeNamespaceHandle = typeDefinition.Namespace; StringHandle typeNameHandle = typeDefinition.Name; typeNamespace = _module.MetadataReader.GetString(typeNamespaceHandle); typeName = _module.MetadataReader.GetString(typeNameHandle); } internal uint Xoshiro128StarStar(uint[] s) { uint result = BitOperations.RotateLeft(s[1] * 5, 7) * 9; uint t = s[1] << 9; s[2] ^= s[0]; s[3] ^= s[1]; s[1] ^= s[2]; s[0] ^= s[3]; s[2] ^= t; s[3] = BitOperations.RotateLeft(s[3], 11); return result; } public override ObjectData GetData(NodeFactory factory, bool relocsOnly = false) { // This node does not trigger generation of other nodes. if (relocsOnly) return new ObjectData(Array.Empty<byte>(), Array.Empty<Relocation>(), 1, new ISymbolDefinitionNode[] { this }); List<CustomAttributeEntry> customAttributeEntries = GetCustomAttributeEntries(); int countOfEntries = customAttributeEntries.Count; if (countOfEntries == 0) { return new ObjectData(Array.Empty<byte>(), Array.Empty<Relocation>(), 1, new ISymbolDefinitionNode[] { this }); } // Buckets have 8 entries uint minTableBucketCount = (uint)(countOfEntries / 8) + 1; uint bucketCount = 1; // Bucket count must be power of two while (bucketCount < minTableBucketCount) { bucketCount *= 2; } // Resize the array. bool tryAgainWithBiggerTable = false; int countOfRetries = 0; ushort[] pTable; do { tryAgainWithBiggerTable = false; uint actualSizeOfTable = bucketCount * 8; // Buckets have 8 entries in them pTable = new ushort[actualSizeOfTable]; uint[] state = new uint[] {729055690, 833774698, 218408041, 493449127}; // Attempt to fill table foreach (var customAttributeEntry in customAttributeEntries) { string name = customAttributeEntry.TypeNamespace + "." + customAttributeEntry.TypeName; // This hashing algorithm MUST match exactly the logic in NativeCuckooFilter int hashOfAttribute = ReadyToRunHashCode.NameHashCode(name); uint hash = unchecked((uint)ReadyToRunHashCode.CombineTwoValuesIntoHash((uint)hashOfAttribute, (uint)customAttributeEntry.Parent)); ushort fingerprint = (ushort)(hash >> 16); if (fingerprint == 0) { fingerprint = 1; } uint bucketAIndex = hash % bucketCount; uint fingerprintHash = (uint)fingerprint; uint bucketBIndex = (bucketAIndex ^ (fingerprintHash % bucketCount)); Debug.Assert(bucketAIndex == (bucketBIndex ^ (fingerprintHash % bucketCount))); if ((Xoshiro128StarStar(state) & 1) != 0) // Randomly choose which bucket to attempt to fill first { uint temp = bucketAIndex; bucketAIndex = bucketBIndex; bucketBIndex = temp; } Func<uint, ushort, bool> hasEntryInBucket = (uint bucketIndex, ushort fprint) => { for (int i = 0; i < 8; i++) { if (pTable[(bucketIndex * 8) + i] == fprint) { return true; } } return false; }; Func<uint, bool> isEmptyEntryInBucket = (uint bucketIndex) => { for (int i = 0; i < 8; i++) { if (pTable[(bucketIndex * 8) + i] == 0) { return true; } } return false; }; Action<uint, ushort> fillEmptyEntryInBucket = (uint bucketIndex, ushort fprint) => { for (int i = 0; i < 8; i++) { if (pTable[(bucketIndex * 8) + i] == 0) { pTable[(bucketIndex * 8) + i] = fprint; return; } } Debug.Assert(false, "Not possible to reach here"); }; // Scan for pre-existing fingerprint entry in buckets if (hasEntryInBucket(bucketAIndex, fingerprint) || hasEntryInBucket(bucketBIndex, fingerprint)) { continue; } // Determine if there is space in a bucket to add a new entry if (isEmptyEntryInBucket(bucketAIndex)) { fillEmptyEntryInBucket(bucketAIndex, fingerprint); continue; } if (isEmptyEntryInBucket(bucketBIndex)) { fillEmptyEntryInBucket(bucketBIndex, fingerprint); continue; } bool success = false; int MaxNumKicks = 256; // Note, that bucketAIndex itself was chosen randomly above. for (int n = 0; !success && n < MaxNumKicks; n++) { // Randomly swap an entry in bucket bucketAIndex with fingerprint uint entryIndexInBucket = Xoshiro128StarStar(state) & 0x7; ushort temp = fingerprint; fingerprint = pTable[(bucketAIndex * 8) + entryIndexInBucket]; pTable[(bucketAIndex * 8) + entryIndexInBucket] = temp; // Find other bucket fingerprintHash = (uint)fingerprint; bucketAIndex = bucketAIndex ^ (fingerprintHash % bucketCount); if (isEmptyEntryInBucket(bucketAIndex)) { fillEmptyEntryInBucket(bucketAIndex, fingerprint); success = true; } } if (success) { continue; } tryAgainWithBiggerTable = true; } if (tryAgainWithBiggerTable) { // bucket entry kicking path requires bucket counts to be power of two in size due to use of xor to retrieve second hash bucketCount *= 2; } } while(tryAgainWithBiggerTable && ((countOfRetries++) < 2)); byte[] result; if (tryAgainWithBiggerTable) { result = Array.Empty<byte>(); } else { result = new byte[pTable.Length * 2]; for (int i = 0; i < pTable.Length; i++) { result[i * 2] = (byte)(pTable[i] % 256); result[i * 2 + 1] = (byte)(pTable[i] >> 8); } } ObjectDataBuilder builder = new ObjectDataBuilder(factory, relocsOnly); builder.RequireInitialAlignment(16); builder.AddSymbol(this); builder.EmitBytes(result); return builder.ToObjectData(); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Collections.Generic; using System.Collections.Immutable; using System.Diagnostics; using System.Numerics; using System.Reflection.Metadata; using System.Reflection.Metadata.Ecma335; using Internal.Text; using Internal.TypeSystem.Ecma; namespace ILCompiler.DependencyAnalysis.ReadyToRun { public class AttributePresenceFilterNode : HeaderTableNode { private EcmaModule _module; public override int ClassCode => 56456113; public AttributePresenceFilterNode(EcmaModule module) : base(module.Context.Target) { _module = module; } public override void AppendMangledName(NameMangler nameMangler, Utf8StringBuilder sb) { sb.Append(nameMangler.CompilationUnitPrefix); sb.Append("__ReadyToRunAttributePresenceFilter__"); sb.Append(_module.Assembly.GetName().Name); } private struct CustomAttributeEntry { public string TypeNamespace; public string TypeName; public int Parent; } private List<CustomAttributeEntry> GetCustomAttributeEntries() { MetadataReader reader = _module.MetadataReader; List<CustomAttributeEntry> customAttributeEntries = new List<CustomAttributeEntry>(); foreach (var handle in reader.CustomAttributes) { CustomAttribute customAttribute = reader.GetCustomAttribute(handle); EntityHandle customAttributeConstructorHandle = customAttribute.Constructor; string customAttributeTypeNamespace, customAttributeTypeName; ReadCustomAttributeTypeNameWithoutResolving(customAttributeConstructorHandle, out customAttributeTypeNamespace, out customAttributeTypeName); // System.Runtime.CompilerServices.NullableAttribute is NEVER added to the table (There are *many* of these, and they provide no useful value to the runtime) if (customAttributeTypeNamespace == "System.Runtime.CompilerServices" && customAttributeTypeName == "NullableAttribute") { continue; } bool addToTable = false; if (customAttributeTypeNamespace.StartsWith("System.Runtime.")) { addToTable = true; } else if (customAttributeTypeNamespace == "System") { // Some historical well known attributes were placed in the System namespace. Special case them if (customAttributeTypeName == "ParamArrayAttribute") { addToTable = true; } else if (customAttributeTypeName == "ThreadStaticAttribute") { addToTable = true; } } else if (customAttributeTypeNamespace == "System.Reflection") { // Historical attribute in the System.Reflection namespace if (customAttributeTypeName == "DefaultMemberAttribute") { addToTable = true; } } if (!addToTable) continue; customAttributeEntries.Add(new CustomAttributeEntry { TypeNamespace = customAttributeTypeNamespace, TypeName = customAttributeTypeName, Parent = reader.GetToken(customAttribute.Parent) }); } return customAttributeEntries; } /** * This class is used to extract the first type handle in a signature. * * In the case that a custom attribute's constructor is a MemberReference, * and its parent is a TypeSpec, we have to parse the signature, but we do * not want to actually resolve the types. So we used this dummy signature * type provider to extract the first type handle. */ private class FirstTypeHandleExtractor : ISignatureTypeProvider<DummyType, DummyGenericContext> { private EntityHandle _firstTypeHandle; public EntityHandle FirstTypeHandle => _firstTypeHandle; public DummyType GetTypeFromDefinition(MetadataReader reader, TypeDefinitionHandle handle, byte rawTypeKind) { if (_firstTypeHandle.IsNil) { _firstTypeHandle = handle; } return new DummyType(); } public DummyType GetTypeFromReference(MetadataReader reader, TypeReferenceHandle handle, byte rawTypeKind) { if (_firstTypeHandle.IsNil) { _firstTypeHandle = handle; } return DummyType.Instance; } #region Uninteresting dummy methods // These methods are required by the interface, but it is otherwise uninteresting for our purpose here public DummyType GetArrayType(DummyType elementType, ArrayShape shape) { return DummyType.Instance; } public DummyType GetByReferenceType(DummyType elementType) { return DummyType.Instance; } public DummyType GetFunctionPointerType(MethodSignature<DummyType> signature) { return DummyType.Instance; } public DummyType GetGenericInstantiation(DummyType genericType, ImmutableArray<DummyType> typeArguments) { return DummyType.Instance; } public DummyType GetGenericMethodParameter(DummyGenericContext genericContext, int index) { return DummyType.Instance; } public DummyType GetGenericTypeParameter(DummyGenericContext genericContext, int index) { return DummyType.Instance; } public DummyType GetModifiedType(DummyType modifier, DummyType unmodifiedType, bool isRequired) { return DummyType.Instance; } public DummyType GetPinnedType(DummyType elementType) { return DummyType.Instance; } public DummyType GetPointerType(DummyType elementType) { return DummyType.Instance; } public DummyType GetPrimitiveType(PrimitiveTypeCode typeCode) { return new DummyType(); } public DummyType GetSZArrayType(DummyType elementType) { return DummyType.Instance; } public DummyType GetTypeFromSpecification(MetadataReader reader, DummyGenericContext genericContext, TypeSpecificationHandle handle, byte rawTypeKind) { return DummyType.Instance; } #endregion } #region Uninteresting dummy types private class DummyType { public static DummyType Instance = new DummyType(); } private class DummyGenericContext { } #endregion private void ReadCustomAttributeTypeNameWithoutResolving(EntityHandle customAttributeConstructorHandle, out string customAttributeTypeNamespace, out string customAttributeTypeName) { /** * It is possible that the assembly that defines the attribute is not provided as a reference assembly. * * Most the time, as long as the custom attribute is not accessed or the reference assembly is available at runtime, the code will work just fine. * * If we used _module.GetMethod(customAttributeConstructorHandle), we should have caused an exception and failing the compilation. * * Therefore, we have this alternate path to obtain the type namespace and name. */ if (customAttributeConstructorHandle.Kind == HandleKind.MethodDefinition) { MethodDefinitionHandle customAttributeConstructorDefinitionHandle = (MethodDefinitionHandle)customAttributeConstructorHandle; MethodDefinition customAttributeConstructorDefinition = _module.MetadataReader.GetMethodDefinition(customAttributeConstructorDefinitionHandle); TypeDefinitionHandle customAttributeConstructorTypeDefinitionHandle = customAttributeConstructorDefinition.GetDeclaringType(); GetTypeNameFromTypeDefinitionHandle(customAttributeConstructorTypeDefinitionHandle, out customAttributeTypeNamespace, out customAttributeTypeName); } else if (customAttributeConstructorHandle.Kind == HandleKind.MemberReference) { MemberReferenceHandle customAttributeConstructorReferenceHandle = (MemberReferenceHandle)customAttributeConstructorHandle; MemberReference customAttributeConstructorReference = _module.MetadataReader.GetMemberReference(customAttributeConstructorReferenceHandle); EntityHandle customAttributeConstructorReferenceParentHandle = customAttributeConstructorReference.Parent; if (customAttributeConstructorReferenceParentHandle.Kind == HandleKind.TypeReference) { TypeReferenceHandle customAttributeConstructorTypeReferenceHandle = (TypeReferenceHandle)customAttributeConstructorReferenceParentHandle; GetTypeNameFromTypeReferenceHandle(customAttributeConstructorTypeReferenceHandle, out customAttributeTypeNamespace, out customAttributeTypeName); } else { Debug.Assert(customAttributeConstructorReferenceParentHandle.Kind == HandleKind.TypeSpecification); TypeSpecificationHandle customAttributeConstructorTypeSpecificationHandle = (TypeSpecificationHandle)customAttributeConstructorReferenceParentHandle; TypeSpecification customAttributeConstructorTypeSpecification = _module.MetadataReader.GetTypeSpecification(customAttributeConstructorTypeSpecificationHandle); FirstTypeHandleExtractor fakeSignatureTypeProvider = new FirstTypeHandleExtractor(); customAttributeConstructorTypeSpecification.DecodeSignature(fakeSignatureTypeProvider, new DummyGenericContext()); EntityHandle firstTypeHandle = fakeSignatureTypeProvider.FirstTypeHandle; if (firstTypeHandle.Kind == HandleKind.TypeDefinition) { TypeDefinitionHandle customAttributeConstructorTypeDefinitionHandle = (TypeDefinitionHandle)firstTypeHandle; GetTypeNameFromTypeDefinitionHandle(customAttributeConstructorTypeDefinitionHandle, out customAttributeTypeNamespace, out customAttributeTypeName); } else { Debug.Assert(firstTypeHandle.Kind == HandleKind.TypeReference); TypeReferenceHandle customAttributeConstructorTypeReferenceHandle = (TypeReferenceHandle)firstTypeHandle; GetTypeNameFromTypeReferenceHandle(customAttributeConstructorTypeReferenceHandle, out customAttributeTypeNamespace, out customAttributeTypeName); } } } else { Debug.Assert(false); customAttributeTypeNamespace = null; customAttributeTypeName = null; } } private void GetTypeNameFromTypeReferenceHandle(TypeReferenceHandle typeReferenceHandle, out string typeNamespace, out string typeName) { TypeReference typeReference = _module.MetadataReader.GetTypeReference(typeReferenceHandle); StringHandle typeNamespaceHandle = typeReference.Namespace; StringHandle typeNameHandle = typeReference.Name; typeNamespace = _module.MetadataReader.GetString(typeNamespaceHandle); typeName = _module.MetadataReader.GetString(typeNameHandle); } private void GetTypeNameFromTypeDefinitionHandle(TypeDefinitionHandle typeDefinitionHandle, out string typeNamespace, out string typeName) { TypeDefinition typeDefinition = _module.MetadataReader.GetTypeDefinition(typeDefinitionHandle); StringHandle typeNamespaceHandle = typeDefinition.Namespace; StringHandle typeNameHandle = typeDefinition.Name; typeNamespace = _module.MetadataReader.GetString(typeNamespaceHandle); typeName = _module.MetadataReader.GetString(typeNameHandle); } internal uint Xoshiro128StarStar(uint[] s) { uint result = BitOperations.RotateLeft(s[1] * 5, 7) * 9; uint t = s[1] << 9; s[2] ^= s[0]; s[3] ^= s[1]; s[1] ^= s[2]; s[0] ^= s[3]; s[2] ^= t; s[3] = BitOperations.RotateLeft(s[3], 11); return result; } public override ObjectData GetData(NodeFactory factory, bool relocsOnly = false) { // This node does not trigger generation of other nodes. if (relocsOnly) return new ObjectData(Array.Empty<byte>(), Array.Empty<Relocation>(), 1, new ISymbolDefinitionNode[] { this }); List<CustomAttributeEntry> customAttributeEntries = GetCustomAttributeEntries(); int countOfEntries = customAttributeEntries.Count; if (countOfEntries == 0) { return new ObjectData(Array.Empty<byte>(), Array.Empty<Relocation>(), 1, new ISymbolDefinitionNode[] { this }); } // Buckets have 8 entries uint minTableBucketCount = (uint)(countOfEntries / 8) + 1; uint bucketCount = 1; // Bucket count must be power of two while (bucketCount < minTableBucketCount) { bucketCount *= 2; } // Resize the array. bool tryAgainWithBiggerTable = false; int countOfRetries = 0; ushort[] pTable; do { tryAgainWithBiggerTable = false; uint actualSizeOfTable = bucketCount * 8; // Buckets have 8 entries in them pTable = new ushort[actualSizeOfTable]; uint[] state = new uint[] {729055690, 833774698, 218408041, 493449127}; // Attempt to fill table foreach (var customAttributeEntry in customAttributeEntries) { string name = customAttributeEntry.TypeNamespace + "." + customAttributeEntry.TypeName; // This hashing algorithm MUST match exactly the logic in NativeCuckooFilter int hashOfAttribute = ReadyToRunHashCode.NameHashCode(name); uint hash = unchecked((uint)ReadyToRunHashCode.CombineTwoValuesIntoHash((uint)hashOfAttribute, (uint)customAttributeEntry.Parent)); ushort fingerprint = (ushort)(hash >> 16); if (fingerprint == 0) { fingerprint = 1; } uint bucketAIndex = hash % bucketCount; uint fingerprintHash = (uint)fingerprint; uint bucketBIndex = (bucketAIndex ^ (fingerprintHash % bucketCount)); Debug.Assert(bucketAIndex == (bucketBIndex ^ (fingerprintHash % bucketCount))); if ((Xoshiro128StarStar(state) & 1) != 0) // Randomly choose which bucket to attempt to fill first { uint temp = bucketAIndex; bucketAIndex = bucketBIndex; bucketBIndex = temp; } Func<uint, ushort, bool> hasEntryInBucket = (uint bucketIndex, ushort fprint) => { for (int i = 0; i < 8; i++) { if (pTable[(bucketIndex * 8) + i] == fprint) { return true; } } return false; }; Func<uint, bool> isEmptyEntryInBucket = (uint bucketIndex) => { for (int i = 0; i < 8; i++) { if (pTable[(bucketIndex * 8) + i] == 0) { return true; } } return false; }; Action<uint, ushort> fillEmptyEntryInBucket = (uint bucketIndex, ushort fprint) => { for (int i = 0; i < 8; i++) { if (pTable[(bucketIndex * 8) + i] == 0) { pTable[(bucketIndex * 8) + i] = fprint; return; } } Debug.Assert(false, "Not possible to reach here"); }; // Scan for pre-existing fingerprint entry in buckets if (hasEntryInBucket(bucketAIndex, fingerprint) || hasEntryInBucket(bucketBIndex, fingerprint)) { continue; } // Determine if there is space in a bucket to add a new entry if (isEmptyEntryInBucket(bucketAIndex)) { fillEmptyEntryInBucket(bucketAIndex, fingerprint); continue; } if (isEmptyEntryInBucket(bucketBIndex)) { fillEmptyEntryInBucket(bucketBIndex, fingerprint); continue; } bool success = false; int MaxNumKicks = 256; // Note, that bucketAIndex itself was chosen randomly above. for (int n = 0; !success && n < MaxNumKicks; n++) { // Randomly swap an entry in bucket bucketAIndex with fingerprint uint entryIndexInBucket = Xoshiro128StarStar(state) & 0x7; ushort temp = fingerprint; fingerprint = pTable[(bucketAIndex * 8) + entryIndexInBucket]; pTable[(bucketAIndex * 8) + entryIndexInBucket] = temp; // Find other bucket fingerprintHash = (uint)fingerprint; bucketAIndex = bucketAIndex ^ (fingerprintHash % bucketCount); if (isEmptyEntryInBucket(bucketAIndex)) { fillEmptyEntryInBucket(bucketAIndex, fingerprint); success = true; } } if (success) { continue; } tryAgainWithBiggerTable = true; } if (tryAgainWithBiggerTable) { // bucket entry kicking path requires bucket counts to be power of two in size due to use of xor to retrieve second hash bucketCount *= 2; } } while(tryAgainWithBiggerTable && ((countOfRetries++) < 2)); byte[] result; if (tryAgainWithBiggerTable) { result = Array.Empty<byte>(); } else { result = new byte[pTable.Length * 2]; for (int i = 0; i < pTable.Length; i++) { result[i * 2] = (byte)(pTable[i] % 256); result[i * 2 + 1] = (byte)(pTable[i] >> 8); } } ObjectDataBuilder builder = new ObjectDataBuilder(factory, relocsOnly); builder.RequireInitialAlignment(16); builder.AddSymbol(this); builder.EmitBytes(result); return builder.ToObjectData(); } } }
-1
dotnet/runtime
66,235
Revert "Fix issues related to JsonSerializerOptions mutation and caching."
Reverts dotnet/runtime#65863 Fixes #66232
jkotas
2022-03-05T06:23:45Z
2022-03-05T14:08:07Z
17662fc30cfd4cc6e7dbba978a3fb512380c0b70
3ede8095da51d5d27a890020b61376155e9a61c2
Revert "Fix issues related to JsonSerializerOptions mutation and caching.". Reverts dotnet/runtime#65863 Fixes #66232
./src/libraries/Microsoft.XmlSerializer.Generator/src/GenerateNupkgProps.targets
<Project> <PropertyGroup> <BeforePack>GenerateNupkgProps;$(BeforePack)</BeforePack> <PropsFilePath>$(BaseIntermediateOutputPath)$(PackageId).props</PropsFilePath> </PropertyGroup> <ItemGroup> <None Include="$(PropsFilePath)" PackagePath="build" Pack="true" /> </ItemGroup> <Target Name="GenerateNupkgProps" Inputs="$(MSBuildThisFileFullPath)" Outputs="$(PropsFilePath)"> <PropertyGroup> <PropsFileContents> &lt;Project&gt; &lt;ItemGroup&gt; &lt;DotNetCliToolReference Include="$(PackageId)" Version="$(PackageVersion)" /&gt; &lt;/ItemGroup&gt; &lt;/Project&gt; </PropsFileContents> </PropertyGroup> <WriteLinesToFile File="$(PropsFilePath)" Lines="$(PropsFileContents)" Overwrite="true" /> </Target> </Project>
<Project> <PropertyGroup> <BeforePack>GenerateNupkgProps;$(BeforePack)</BeforePack> <PropsFilePath>$(BaseIntermediateOutputPath)$(PackageId).props</PropsFilePath> </PropertyGroup> <ItemGroup> <None Include="$(PropsFilePath)" PackagePath="build" Pack="true" /> </ItemGroup> <Target Name="GenerateNupkgProps" Inputs="$(MSBuildThisFileFullPath)" Outputs="$(PropsFilePath)"> <PropertyGroup> <PropsFileContents> &lt;Project&gt; &lt;ItemGroup&gt; &lt;DotNetCliToolReference Include="$(PackageId)" Version="$(PackageVersion)" /&gt; &lt;/ItemGroup&gt; &lt;/Project&gt; </PropsFileContents> </PropertyGroup> <WriteLinesToFile File="$(PropsFilePath)" Lines="$(PropsFileContents)" Overwrite="true" /> </Target> </Project>
-1
dotnet/runtime
66,235
Revert "Fix issues related to JsonSerializerOptions mutation and caching."
Reverts dotnet/runtime#65863 Fixes #66232
jkotas
2022-03-05T06:23:45Z
2022-03-05T14:08:07Z
17662fc30cfd4cc6e7dbba978a3fb512380c0b70
3ede8095da51d5d27a890020b61376155e9a61c2
Revert "Fix issues related to JsonSerializerOptions mutation and caching.". Reverts dotnet/runtime#65863 Fixes #66232
./src/libraries/System.Net.ServicePoint/ref/System.Net.ServicePoint.csproj
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <TargetFramework>$(NetCoreAppCurrent)</TargetFramework> <Nullable>enable</Nullable> </PropertyGroup> <ItemGroup> <Compile Include="System.Net.ServicePoint.cs" /> </ItemGroup> <ItemGroup> <ProjectReference Include="..\..\System.Net.Primitives\ref\System.Net.Primitives.csproj" /> <ProjectReference Include="..\..\System.Net.Security\ref\System.Net.Security.csproj" /> <ProjectReference Include="..\..\System.Runtime\ref\System.Runtime.csproj" /> <ProjectReference Include="..\..\System.Security.Cryptography.X509Certificates\ref\System.Security.Cryptography.X509Certificates.csproj" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <TargetFramework>$(NetCoreAppCurrent)</TargetFramework> <Nullable>enable</Nullable> </PropertyGroup> <ItemGroup> <Compile Include="System.Net.ServicePoint.cs" /> </ItemGroup> <ItemGroup> <ProjectReference Include="..\..\System.Net.Primitives\ref\System.Net.Primitives.csproj" /> <ProjectReference Include="..\..\System.Net.Security\ref\System.Net.Security.csproj" /> <ProjectReference Include="..\..\System.Runtime\ref\System.Runtime.csproj" /> <ProjectReference Include="..\..\System.Security.Cryptography.X509Certificates\ref\System.Security.Cryptography.X509Certificates.csproj" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,235
Revert "Fix issues related to JsonSerializerOptions mutation and caching."
Reverts dotnet/runtime#65863 Fixes #66232
jkotas
2022-03-05T06:23:45Z
2022-03-05T14:08:07Z
17662fc30cfd4cc6e7dbba978a3fb512380c0b70
3ede8095da51d5d27a890020b61376155e9a61c2
Revert "Fix issues related to JsonSerializerOptions mutation and caching.". Reverts dotnet/runtime#65863 Fixes #66232
./src/tests/JIT/jit64/valuetypes/nullable/box-unbox/null/box-unbox-null030.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Runtime.InteropServices; using System; internal class NullableTest { private static bool BoxUnboxToNQGen<T>(T o) { return ((object)o) == null; } private static bool BoxUnboxToQGen<T>(T? o) where T : struct { return ((T?)o) == null; } private static bool BoxUnboxToNQ(object o) { return o == null; } private static bool BoxUnboxToQ(object o) { return ((NotEmptyStructConstrainedGenQ<int>?)o) == null; } private static int Main() { NotEmptyStructConstrainedGenQ<int>? s = null; if (BoxUnboxToNQ(s) && BoxUnboxToQ(s) && BoxUnboxToNQGen(s) && BoxUnboxToQGen(s)) return ExitCode.Passed; else return ExitCode.Failed; } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Runtime.InteropServices; using System; internal class NullableTest { private static bool BoxUnboxToNQGen<T>(T o) { return ((object)o) == null; } private static bool BoxUnboxToQGen<T>(T? o) where T : struct { return ((T?)o) == null; } private static bool BoxUnboxToNQ(object o) { return o == null; } private static bool BoxUnboxToQ(object o) { return ((NotEmptyStructConstrainedGenQ<int>?)o) == null; } private static int Main() { NotEmptyStructConstrainedGenQ<int>? s = null; if (BoxUnboxToNQ(s) && BoxUnboxToQ(s) && BoxUnboxToNQGen(s) && BoxUnboxToQGen(s)) return ExitCode.Passed; else return ExitCode.Failed; } }
-1
dotnet/runtime
66,235
Revert "Fix issues related to JsonSerializerOptions mutation and caching."
Reverts dotnet/runtime#65863 Fixes #66232
jkotas
2022-03-05T06:23:45Z
2022-03-05T14:08:07Z
17662fc30cfd4cc6e7dbba978a3fb512380c0b70
3ede8095da51d5d27a890020b61376155e9a61c2
Revert "Fix issues related to JsonSerializerOptions mutation and caching.". Reverts dotnet/runtime#65863 Fixes #66232
./src/tests/JIT/jit64/valuetypes/nullable/castclass/generics/castclass-generics039.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Runtime.InteropServices; using System; internal class NullableTest { private static bool BoxUnboxToNQ<T>(T o) { return Helper.Compare((ImplementTwoInterface)(ValueType)(object)o, Helper.Create(default(ImplementTwoInterface))); } private static bool BoxUnboxToQ<T>(T o) { return Helper.Compare((ImplementTwoInterface?)(ValueType)(object)o, Helper.Create(default(ImplementTwoInterface))); } private static int Main() { ImplementTwoInterface? s = Helper.Create(default(ImplementTwoInterface)); if (BoxUnboxToNQ(s) && BoxUnboxToQ(s)) return ExitCode.Passed; else return ExitCode.Failed; } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Runtime.InteropServices; using System; internal class NullableTest { private static bool BoxUnboxToNQ<T>(T o) { return Helper.Compare((ImplementTwoInterface)(ValueType)(object)o, Helper.Create(default(ImplementTwoInterface))); } private static bool BoxUnboxToQ<T>(T o) { return Helper.Compare((ImplementTwoInterface?)(ValueType)(object)o, Helper.Create(default(ImplementTwoInterface))); } private static int Main() { ImplementTwoInterface? s = Helper.Create(default(ImplementTwoInterface)); if (BoxUnboxToNQ(s) && BoxUnboxToQ(s)) return ExitCode.Passed; else return ExitCode.Failed; } }
-1
dotnet/runtime
66,235
Revert "Fix issues related to JsonSerializerOptions mutation and caching."
Reverts dotnet/runtime#65863 Fixes #66232
jkotas
2022-03-05T06:23:45Z
2022-03-05T14:08:07Z
17662fc30cfd4cc6e7dbba978a3fb512380c0b70
3ede8095da51d5d27a890020b61376155e9a61c2
Revert "Fix issues related to JsonSerializerOptions mutation and caching.". Reverts dotnet/runtime#65863 Fixes #66232
./src/tests/JIT/IL_Conformance/Old/Conformance_Base/ldc_conv_ovf_i4_u4.il
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. .assembly extern legacy library mscorlib {} .assembly extern System.Console { .publickeytoken = (B0 3F 5F 7F 11 D5 0A 3A ) .ver 4:0:0:0 } // //====================================== //---- CLASS ---------------- .class public conv_ovf_u4 { //---- GLOBAL DATA ---------- //---- METHODS -------------- .method public static int32 conv_0(int32,int32) { .locals (class [mscorlib]System.OverflowException,int32) .maxstack 2 try_start: ldarg 0 conv.ovf.u4 //- No exception ldarg 1 ceq stloc.1 leave.s try_end try_end: ldloc.1 brfalse FAIL ldc.i4 0x11111111 br END aHandler: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd HEnd: ldloc 0 brfalse FAIL ldc.i4 0xEEEEEEEE br END FAIL: ldc.i4 0x00000000 br END END: ret .try try_start to try_end catch [mscorlib]System.OverflowException handler aHandler to HEnd } .method public static int32 conv_un_0(int32,int32) { .locals (class [mscorlib]System.OverflowException,unsigned int32) .maxstack 2 try_start2: ldarg 0 conv.ovf.u4.un stloc.1 leave.s try_end2 try_end2: //- No exception ldloc.1 ldarg 1 ceq brfalse FAIL2 ldc.i4 0x11111111 br END2 aHandler2: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd2 HEnd2: ldloc 0 brfalse FAIL2 ldc.i4 0xEEEEEEEE br END2 FAIL2: ldc.i4 0x00000000 br END2 END2: ret .try try_start2 to try_end2 catch [mscorlib]System.OverflowException handler aHandler2 to HEnd2 } .method public static int32 conv_1(int32,int32) { .locals (class [mscorlib]System.OverflowException,int32) .maxstack 2 try_start: ldarg 0 conv.ovf.u4 //- No exception ldarg 1 ceq stloc.1 leave.s try_end try_end: ldloc.1 brfalse FAIL ldc.i4 0x11111111 br END aHandler: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd HEnd: ldloc 0 brfalse FAIL ldc.i4 0xEEEEEEEE br END FAIL: ldc.i4 0x00000000 br END END: ret .try try_start to try_end catch [mscorlib]System.OverflowException handler aHandler to HEnd } .method public static int32 conv_un_1(int32,int32) { .locals (class [mscorlib]System.OverflowException,unsigned int32) .maxstack 2 try_start2: ldarg 0 conv.ovf.u4.un stloc.1 leave.s try_end2 try_end2: //- No exception ldloc.1 ldarg 1 ceq brfalse FAIL2 ldc.i4 0x11111111 br END2 aHandler2: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd2 HEnd2: ldloc 0 brfalse FAIL2 ldc.i4 0xEEEEEEEE br END2 FAIL2: ldc.i4 0x00000000 br END2 END2: ret .try try_start2 to try_end2 catch [mscorlib]System.OverflowException handler aHandler2 to HEnd2 } .method public static int32 conv_2(int32,int32) { .locals (class [mscorlib]System.OverflowException,int32) .maxstack 2 try_start: ldarg 0 conv.ovf.u4 //- No exception ldarg 1 ceq stloc.1 leave.s try_end try_end: ldloc.1 brfalse FAIL ldc.i4 0x11111111 br END aHandler: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd HEnd: ldloc 0 brfalse FAIL ldc.i4 0xEEEEEEEE br END FAIL: ldc.i4 0x00000000 br END END: ret .try try_start to try_end catch [mscorlib]System.OverflowException handler aHandler to HEnd } .method public static int32 conv_un_2(int32,int32) { .locals (class [mscorlib]System.OverflowException,unsigned int32) .maxstack 2 try_start2: ldarg 0 conv.ovf.u4.un stloc.1 leave.s try_end2 try_end2: //- No exception ldloc.1 ldarg 1 ceq brfalse FAIL2 ldc.i4 0x11111111 br END2 aHandler2: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd2 HEnd2: ldloc 0 brfalse FAIL2 ldc.i4 0xEEEEEEEE br END2 FAIL2: ldc.i4 0x00000000 br END2 END2: ret .try try_start2 to try_end2 catch [mscorlib]System.OverflowException handler aHandler2 to HEnd2 } .method public static int32 conv_3(int32,int32) { .locals (class [mscorlib]System.OverflowException,int32) .maxstack 2 try_start: ldarg 0 conv.ovf.u4 //- No exception ldarg 1 ceq stloc.1 leave.s try_end try_end: ldloc.1 brfalse FAIL ldc.i4 0x11111111 br END aHandler: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd HEnd: ldloc 0 brfalse FAIL ldc.i4 0xEEEEEEEE br END FAIL: ldc.i4 0x00000000 br END END: ret .try try_start to try_end catch [mscorlib]System.OverflowException handler aHandler to HEnd } .method public static int32 conv_un_3(int32,int32) { .locals (class [mscorlib]System.OverflowException,unsigned int32) .maxstack 2 try_start2: ldarg 0 conv.ovf.u4.un stloc.1 leave.s try_end2 try_end2: //- No exception ldloc.1 ldarg 1 ceq brfalse FAIL2 ldc.i4 0x11111111 br END2 aHandler2: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd2 HEnd2: ldloc 0 brfalse FAIL2 ldc.i4 0xEEEEEEEE br END2 FAIL2: ldc.i4 0x00000000 br END2 END2: ret .try try_start2 to try_end2 catch [mscorlib]System.OverflowException handler aHandler2 to HEnd2 } .method public static int32 conv_4(int32,int32) { .locals (class [mscorlib]System.OverflowException,int32) .maxstack 2 try_start: ldarg 0 conv.ovf.u4 //- No exception ldarg 1 ceq stloc.1 leave.s try_end try_end: ldloc.1 brfalse FAIL ldc.i4 0x11111111 br END aHandler: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd HEnd: ldloc 0 brfalse FAIL ldc.i4 0xEEEEEEEE br END FAIL: ldc.i4 0x00000000 br END END: ret .try try_start to try_end catch [mscorlib]System.OverflowException handler aHandler to HEnd } .method public static int32 conv_un_4(int32,int32) { .locals (class [mscorlib]System.OverflowException,unsigned int32) .maxstack 2 try_start2: ldarg 0 conv.ovf.u4.un stloc.1 leave.s try_end2 try_end2: //- No exception ldloc.1 ldarg 1 ceq brfalse FAIL2 ldc.i4 0x11111111 br END2 aHandler2: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd2 HEnd2: ldloc 0 brfalse FAIL2 ldc.i4 0xEEEEEEEE br END2 FAIL2: ldc.i4 0x00000000 br END2 END2: ret .try try_start2 to try_end2 catch [mscorlib]System.OverflowException handler aHandler2 to HEnd2 } .method public static int32 conv_5(int32,int32) { .locals (class [mscorlib]System.OverflowException,int32) .maxstack 2 try_start: ldarg 0 conv.ovf.u4 //- No exception ldarg 1 ceq stloc.1 leave.s try_end try_end: ldloc.1 brfalse FAIL ldc.i4 0x11111111 br END aHandler: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd HEnd: ldloc 0 brfalse FAIL ldc.i4 0xEEEEEEEE br END FAIL: ldc.i4 0x00000000 br END END: ret .try try_start to try_end catch [mscorlib]System.OverflowException handler aHandler to HEnd } .method public static int32 conv_un_5(int32,int32) { .locals (class [mscorlib]System.OverflowException,unsigned int32) .maxstack 2 try_start2: ldarg 0 conv.ovf.u4.un stloc.1 leave.s try_end2 try_end2: //- No exception ldloc.1 ldarg 1 ceq brfalse FAIL2 ldc.i4 0x11111111 br END2 aHandler2: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd2 HEnd2: ldloc 0 brfalse FAIL2 ldc.i4 0xEEEEEEEE br END2 FAIL2: ldc.i4 0x00000000 br END2 END2: ret .try try_start2 to try_end2 catch [mscorlib]System.OverflowException handler aHandler2 to HEnd2 } .method public static int32 conv_6(int32,int32) { .locals (class [mscorlib]System.OverflowException,int32) .maxstack 2 try_start: ldarg 0 conv.ovf.u4 //- No exception ldarg 1 ceq stloc.1 leave.s try_end try_end: ldloc.1 brfalse FAIL ldc.i4 0x11111111 br END aHandler: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd HEnd: ldloc 0 brfalse FAIL ldc.i4 0xEEEEEEEE br END FAIL: ldc.i4 0x00000000 br END END: ret .try try_start to try_end catch [mscorlib]System.OverflowException handler aHandler to HEnd } .method public static int32 conv_un_6(int32,int32) { .locals (class [mscorlib]System.OverflowException,unsigned int32) .maxstack 2 try_start2: ldarg 0 conv.ovf.u4.un stloc.1 leave.s try_end2 try_end2: //- No exception ldloc.1 ldarg 1 ceq brfalse FAIL2 ldc.i4 0x11111111 br END2 aHandler2: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd2 HEnd2: ldloc 0 brfalse FAIL2 ldc.i4 0xEEEEEEEE br END2 FAIL2: ldc.i4 0x00000000 br END2 END2: ret .try try_start2 to try_end2 catch [mscorlib]System.OverflowException handler aHandler2 to HEnd2 } .method public static int32 conv_7(int32,int32) { .locals (class [mscorlib]System.OverflowException,int32) .maxstack 2 try_start: ldarg 0 conv.ovf.u4 //- No exception ldarg 1 ceq stloc.1 leave.s try_end try_end: ldloc.1 brfalse FAIL ldc.i4 0x11111111 br END aHandler: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd HEnd: ldloc 0 brfalse FAIL ldc.i4 0xEEEEEEEE br END FAIL: ldc.i4 0x00000000 br END END: ret .try try_start to try_end catch [mscorlib]System.OverflowException handler aHandler to HEnd } .method public static int32 conv_un_7(int32,int32) { .locals (class [mscorlib]System.OverflowException,unsigned int32) .maxstack 2 try_start2: ldarg 0 conv.ovf.u4.un stloc.1 leave.s try_end2 try_end2: //- No exception ldloc.1 ldarg 1 ceq brfalse FAIL2 ldc.i4 0x11111111 br END2 aHandler2: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd2 HEnd2: ldloc 0 brfalse FAIL2 ldc.i4 0xEEEEEEEE br END2 FAIL2: ldc.i4 0x00000000 br END2 END2: ret .try try_start2 to try_end2 catch [mscorlib]System.OverflowException handler aHandler2 to HEnd2 } .method public static int32 conv_8(int32,int32) { .locals (class [mscorlib]System.OverflowException,int32) .maxstack 2 try_start: ldarg 0 conv.ovf.u4 //- No exception ldarg 1 ceq stloc.1 leave.s try_end try_end: ldloc.1 brfalse FAIL ldc.i4 0x11111111 br END aHandler: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd HEnd: ldloc 0 brfalse FAIL ldc.i4 0xEEEEEEEE br END FAIL: ldc.i4 0x00000000 br END END: ret .try try_start to try_end catch [mscorlib]System.OverflowException handler aHandler to HEnd } .method public static int32 conv_un_8(int32,int32) { .locals (class [mscorlib]System.OverflowException,unsigned int32) .maxstack 2 try_start2: ldarg 0 conv.ovf.u4.un stloc.1 leave.s try_end2 try_end2: //- No exception ldloc.1 ldarg 1 ceq brfalse FAIL2 ldc.i4 0x11111111 br END2 aHandler2: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd2 HEnd2: ldloc 0 brfalse FAIL2 ldc.i4 0xEEEEEEEE br END2 FAIL2: ldc.i4 0x00000000 br END2 END2: ret .try try_start2 to try_end2 catch [mscorlib]System.OverflowException handler aHandler2 to HEnd2 } .method public static int32 conv_9(int32,int32) { .locals (class [mscorlib]System.OverflowException,int32) .maxstack 2 try_start: ldarg 0 conv.ovf.u4 //- No exception ldarg 1 ceq stloc.1 leave.s try_end try_end: ldloc.1 brfalse FAIL ldc.i4 0x11111111 br END aHandler: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd HEnd: ldloc 0 brfalse FAIL ldc.i4 0xEEEEEEEE br END FAIL: ldc.i4 0x00000000 br END END: ret .try try_start to try_end catch [mscorlib]System.OverflowException handler aHandler to HEnd } .method public static int32 conv_un_9(int32,int32) { .locals (class [mscorlib]System.OverflowException,unsigned int32) .maxstack 2 try_start2: ldarg 0 conv.ovf.u4.un stloc.1 leave.s try_end2 try_end2: //- No exception ldloc.1 ldarg 1 ceq brfalse FAIL2 ldc.i4 0x11111111 br END2 aHandler2: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd2 HEnd2: ldloc 0 brfalse FAIL2 ldc.i4 0xEEEEEEEE br END2 FAIL2: ldc.i4 0x00000000 br END2 END2: ret .try try_start2 to try_end2 catch [mscorlib]System.OverflowException handler aHandler2 to HEnd2 } .method public static int32 conv_10(int32,int32) { .locals (class [mscorlib]System.OverflowException,int32) .maxstack 2 try_start: ldarg 0 conv.ovf.u4 //- No exception ldarg 1 ceq stloc.1 leave.s try_end try_end: ldloc.1 brfalse FAIL ldc.i4 0x11111111 br END aHandler: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd HEnd: ldloc 0 brfalse FAIL ldc.i4 0xEEEEEEEE br END FAIL: ldc.i4 0x00000000 br END END: ret .try try_start to try_end catch [mscorlib]System.OverflowException handler aHandler to HEnd } .method public static int32 conv_un_10(int32,int32) { .locals (class [mscorlib]System.OverflowException,unsigned int32) .maxstack 2 try_start2: ldarg 0 conv.ovf.u4.un stloc.1 leave.s try_end2 try_end2: //- No exception ldloc.1 ldarg 1 ceq brfalse FAIL2 ldc.i4 0x11111111 br END2 aHandler2: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd2 HEnd2: ldloc 0 brfalse FAIL2 ldc.i4 0xEEEEEEEE br END2 FAIL2: ldc.i4 0x00000000 br END2 END2: ret .try try_start2 to try_end2 catch [mscorlib]System.OverflowException handler aHandler2 to HEnd2 } .method public static int32 conv_11(int32,int32) { .locals (class [mscorlib]System.OverflowException,int32) .maxstack 2 try_start: ldarg 0 conv.ovf.u4 //- No exception ldarg 1 ceq stloc.1 leave.s try_end try_end: ldloc.1 brfalse FAIL ldc.i4 0x11111111 br END aHandler: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd HEnd: ldloc 0 brfalse FAIL ldc.i4 0xEEEEEEEE br END FAIL: ldc.i4 0x00000000 br END END: ret .try try_start to try_end catch [mscorlib]System.OverflowException handler aHandler to HEnd } .method public static int32 conv_un_11(int32,int32) { .locals (class [mscorlib]System.OverflowException,unsigned int32) .maxstack 2 try_start2: ldarg 0 conv.ovf.u4.un stloc.1 leave.s try_end2 try_end2: //- No exception ldloc.1 ldarg 1 ceq brfalse FAIL2 ldc.i4 0x11111111 br END2 aHandler2: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd2 HEnd2: ldloc 0 brfalse FAIL2 ldc.i4 0xEEEEEEEE br END2 FAIL2: ldc.i4 0x00000000 br END2 END2: ret .try try_start2 to try_end2 catch [mscorlib]System.OverflowException handler aHandler2 to HEnd2 } .method public static int32 conv_12(int32,int32) { .locals (class [mscorlib]System.OverflowException,int32) .maxstack 2 try_start: ldarg 0 conv.ovf.u4 //- No exception ldarg 1 ceq stloc.1 leave.s try_end try_end: ldloc.1 brfalse FAIL ldc.i4 0x11111111 br END aHandler: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd HEnd: ldloc 0 brfalse FAIL ldc.i4 0xEEEEEEEE br END FAIL: ldc.i4 0x00000000 br END END: ret .try try_start to try_end catch [mscorlib]System.OverflowException handler aHandler to HEnd } .method public static int32 conv_un_12(int32,int32) { .locals (class [mscorlib]System.OverflowException,unsigned int32) .maxstack 2 try_start2: ldarg 0 conv.ovf.u4.un stloc.1 leave.s try_end2 try_end2: //- No exception ldloc.1 ldarg 1 ceq brfalse FAIL2 ldc.i4 0x11111111 br END2 aHandler2: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd2 HEnd2: ldloc 0 brfalse FAIL2 ldc.i4 0xEEEEEEEE br END2 FAIL2: ldc.i4 0x00000000 br END2 END2: ret .try try_start2 to try_end2 catch [mscorlib]System.OverflowException handler aHandler2 to HEnd2 } .method public static int32 conv_13(int32,int32) { .locals (class [mscorlib]System.OverflowException,int32) .maxstack 2 try_start: ldarg 0 conv.ovf.u4 //- No exception ldarg 1 ceq stloc.1 leave.s try_end try_end: ldloc.1 brfalse FAIL ldc.i4 0x11111111 br END aHandler: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd HEnd: ldloc 0 brfalse FAIL ldc.i4 0xEEEEEEEE br END FAIL: ldc.i4 0x00000000 br END END: ret .try try_start to try_end catch [mscorlib]System.OverflowException handler aHandler to HEnd } .method public static int32 conv_un_13(int32,int32) { .locals (class [mscorlib]System.OverflowException,unsigned int32) .maxstack 2 try_start2: ldarg 0 conv.ovf.u4.un stloc.1 leave.s try_end2 try_end2: //- No exception ldloc.1 ldarg 1 ceq brfalse FAIL2 ldc.i4 0x11111111 br END2 aHandler2: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd2 HEnd2: ldloc 0 brfalse FAIL2 ldc.i4 0xEEEEEEEE br END2 FAIL2: ldc.i4 0x00000000 br END2 END2: ret .try try_start2 to try_end2 catch [mscorlib]System.OverflowException handler aHandler2 to HEnd2 } //---- CONSTRUCTOR ---------- .method public void conv_ovf_u4() { .maxstack 1 ret } //---- MAIN ----------------- .method public static int32 main(class [mscorlib]System.String[]) { .entrypoint .maxstack 5 //====== begin testing ====== //-- min i4 -- ldc.i4 0x80000000 ldc.i4 0x00000000 call int32 conv_ovf_u4::conv_0(int32,int32) ldc.i4 0xEEEEEEEE ceq brfalse FAIL //-- -1 i4 -- ldc.i4 0xFFFFFFFF ldc.i4 0x00000000 call int32 conv_ovf_u4::conv_1(int32,int32) ldc.i4 0xEEEEEEEE ceq brfalse FAIL //-- 0 i4 -- ldc.i4 0x00000000 ldc.i4 0x00000000 call int32 conv_ovf_u4::conv_2(int32,int32) ldc.i4 0x11111111 ceq brfalse FAIL //-- +1 i4 -- ldc.i4 0x00000001 ldc.i4 0x00000001 call int32 conv_ovf_u4::conv_3(int32,int32) ldc.i4 0x11111111 ceq brfalse FAIL //-- max i4 -- ldc.i4 0x7FFFFFFF ldc.i4 0x7FFFFFFF call int32 conv_ovf_u4::conv_4(int32,int32) ldc.i4 0x11111111 ceq brfalse FAIL //-- Odd u4 -- ldc.i4 0x55555555 ldc.i4 0x55555555 call int32 conv_ovf_u4::conv_5(int32,int32) ldc.i4 0x11111111 ceq brfalse FAIL //-- Even u4 -- ldc.i4 0xAAAAAAAA ldc.i4 0x00000000 call int32 conv_ovf_u4::conv_6(int32,int32) ldc.i4 0xEEEEEEEE ceq brfalse FAIL //====== conv_un testing ====== //-- min i4 -- /* ldc.i4 0x80000000 ldc.i4 0x00000000 call int32 conv_ovf_u4::conv_un_7(int32,int32) ldc.i4 0xEEEEEEEE ceq brfalse FAIL //-- -1 i4 -- ldc.i4 0xFFFFFFFF ldc.i4 0x00000000 call int32 conv_ovf_u4::conv_un_8(int32,int32) ldc.i4 0xEEEEEEEE ceq brfalse FAIL *///-- 0 i4 -- ldc.i4 0x00000000 ldc.i4 0x00000000 call int32 conv_ovf_u4::conv_un_9(int32,int32) ldc.i4 0x11111111 ceq brfalse FAIL //-- +1 i4 -- ldc.i4 0x00000001 ldc.i4 0x00000001 call int32 conv_ovf_u4::conv_un_10(int32,int32) ldc.i4 0x11111111 ceq brfalse FAIL //-- max i4 -- ldc.i4 0x7FFFFFFF ldc.i4 0x7FFFFFFF call int32 conv_ovf_u4::conv_un_11(int32,int32) ldc.i4 0x11111111 ceq brfalse FAIL //-- Odd u4 -- ldc.i4 0x55555555 ldc.i4 0x55555555 call int32 conv_ovf_u4::conv_un_12(int32,int32) ldc.i4 0x11111111 ceq brfalse FAIL //-- Even u4 -- /* ldc.i4 0xAAAAAAAA ldc.i4 0x00000000 call int32 conv_ovf_u4::conv_un_13(int32,int32) ldc.i4 0xEEEEEEEE ceq brfalse FAIL *///====== end testing ======== //---- branch here on pass -- PASS: ldc.i4 100 br END //---- branch here on fail -- FAIL: ldc.i4 101 //---- return the result ---- END: ret //---- END OF METHOD -------- } //---- EOF ------------------ } .assembly ldc_conv_ovf_i4_u4{}
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. .assembly extern legacy library mscorlib {} .assembly extern System.Console { .publickeytoken = (B0 3F 5F 7F 11 D5 0A 3A ) .ver 4:0:0:0 } // //====================================== //---- CLASS ---------------- .class public conv_ovf_u4 { //---- GLOBAL DATA ---------- //---- METHODS -------------- .method public static int32 conv_0(int32,int32) { .locals (class [mscorlib]System.OverflowException,int32) .maxstack 2 try_start: ldarg 0 conv.ovf.u4 //- No exception ldarg 1 ceq stloc.1 leave.s try_end try_end: ldloc.1 brfalse FAIL ldc.i4 0x11111111 br END aHandler: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd HEnd: ldloc 0 brfalse FAIL ldc.i4 0xEEEEEEEE br END FAIL: ldc.i4 0x00000000 br END END: ret .try try_start to try_end catch [mscorlib]System.OverflowException handler aHandler to HEnd } .method public static int32 conv_un_0(int32,int32) { .locals (class [mscorlib]System.OverflowException,unsigned int32) .maxstack 2 try_start2: ldarg 0 conv.ovf.u4.un stloc.1 leave.s try_end2 try_end2: //- No exception ldloc.1 ldarg 1 ceq brfalse FAIL2 ldc.i4 0x11111111 br END2 aHandler2: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd2 HEnd2: ldloc 0 brfalse FAIL2 ldc.i4 0xEEEEEEEE br END2 FAIL2: ldc.i4 0x00000000 br END2 END2: ret .try try_start2 to try_end2 catch [mscorlib]System.OverflowException handler aHandler2 to HEnd2 } .method public static int32 conv_1(int32,int32) { .locals (class [mscorlib]System.OverflowException,int32) .maxstack 2 try_start: ldarg 0 conv.ovf.u4 //- No exception ldarg 1 ceq stloc.1 leave.s try_end try_end: ldloc.1 brfalse FAIL ldc.i4 0x11111111 br END aHandler: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd HEnd: ldloc 0 brfalse FAIL ldc.i4 0xEEEEEEEE br END FAIL: ldc.i4 0x00000000 br END END: ret .try try_start to try_end catch [mscorlib]System.OverflowException handler aHandler to HEnd } .method public static int32 conv_un_1(int32,int32) { .locals (class [mscorlib]System.OverflowException,unsigned int32) .maxstack 2 try_start2: ldarg 0 conv.ovf.u4.un stloc.1 leave.s try_end2 try_end2: //- No exception ldloc.1 ldarg 1 ceq brfalse FAIL2 ldc.i4 0x11111111 br END2 aHandler2: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd2 HEnd2: ldloc 0 brfalse FAIL2 ldc.i4 0xEEEEEEEE br END2 FAIL2: ldc.i4 0x00000000 br END2 END2: ret .try try_start2 to try_end2 catch [mscorlib]System.OverflowException handler aHandler2 to HEnd2 } .method public static int32 conv_2(int32,int32) { .locals (class [mscorlib]System.OverflowException,int32) .maxstack 2 try_start: ldarg 0 conv.ovf.u4 //- No exception ldarg 1 ceq stloc.1 leave.s try_end try_end: ldloc.1 brfalse FAIL ldc.i4 0x11111111 br END aHandler: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd HEnd: ldloc 0 brfalse FAIL ldc.i4 0xEEEEEEEE br END FAIL: ldc.i4 0x00000000 br END END: ret .try try_start to try_end catch [mscorlib]System.OverflowException handler aHandler to HEnd } .method public static int32 conv_un_2(int32,int32) { .locals (class [mscorlib]System.OverflowException,unsigned int32) .maxstack 2 try_start2: ldarg 0 conv.ovf.u4.un stloc.1 leave.s try_end2 try_end2: //- No exception ldloc.1 ldarg 1 ceq brfalse FAIL2 ldc.i4 0x11111111 br END2 aHandler2: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd2 HEnd2: ldloc 0 brfalse FAIL2 ldc.i4 0xEEEEEEEE br END2 FAIL2: ldc.i4 0x00000000 br END2 END2: ret .try try_start2 to try_end2 catch [mscorlib]System.OverflowException handler aHandler2 to HEnd2 } .method public static int32 conv_3(int32,int32) { .locals (class [mscorlib]System.OverflowException,int32) .maxstack 2 try_start: ldarg 0 conv.ovf.u4 //- No exception ldarg 1 ceq stloc.1 leave.s try_end try_end: ldloc.1 brfalse FAIL ldc.i4 0x11111111 br END aHandler: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd HEnd: ldloc 0 brfalse FAIL ldc.i4 0xEEEEEEEE br END FAIL: ldc.i4 0x00000000 br END END: ret .try try_start to try_end catch [mscorlib]System.OverflowException handler aHandler to HEnd } .method public static int32 conv_un_3(int32,int32) { .locals (class [mscorlib]System.OverflowException,unsigned int32) .maxstack 2 try_start2: ldarg 0 conv.ovf.u4.un stloc.1 leave.s try_end2 try_end2: //- No exception ldloc.1 ldarg 1 ceq brfalse FAIL2 ldc.i4 0x11111111 br END2 aHandler2: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd2 HEnd2: ldloc 0 brfalse FAIL2 ldc.i4 0xEEEEEEEE br END2 FAIL2: ldc.i4 0x00000000 br END2 END2: ret .try try_start2 to try_end2 catch [mscorlib]System.OverflowException handler aHandler2 to HEnd2 } .method public static int32 conv_4(int32,int32) { .locals (class [mscorlib]System.OverflowException,int32) .maxstack 2 try_start: ldarg 0 conv.ovf.u4 //- No exception ldarg 1 ceq stloc.1 leave.s try_end try_end: ldloc.1 brfalse FAIL ldc.i4 0x11111111 br END aHandler: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd HEnd: ldloc 0 brfalse FAIL ldc.i4 0xEEEEEEEE br END FAIL: ldc.i4 0x00000000 br END END: ret .try try_start to try_end catch [mscorlib]System.OverflowException handler aHandler to HEnd } .method public static int32 conv_un_4(int32,int32) { .locals (class [mscorlib]System.OverflowException,unsigned int32) .maxstack 2 try_start2: ldarg 0 conv.ovf.u4.un stloc.1 leave.s try_end2 try_end2: //- No exception ldloc.1 ldarg 1 ceq brfalse FAIL2 ldc.i4 0x11111111 br END2 aHandler2: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd2 HEnd2: ldloc 0 brfalse FAIL2 ldc.i4 0xEEEEEEEE br END2 FAIL2: ldc.i4 0x00000000 br END2 END2: ret .try try_start2 to try_end2 catch [mscorlib]System.OverflowException handler aHandler2 to HEnd2 } .method public static int32 conv_5(int32,int32) { .locals (class [mscorlib]System.OverflowException,int32) .maxstack 2 try_start: ldarg 0 conv.ovf.u4 //- No exception ldarg 1 ceq stloc.1 leave.s try_end try_end: ldloc.1 brfalse FAIL ldc.i4 0x11111111 br END aHandler: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd HEnd: ldloc 0 brfalse FAIL ldc.i4 0xEEEEEEEE br END FAIL: ldc.i4 0x00000000 br END END: ret .try try_start to try_end catch [mscorlib]System.OverflowException handler aHandler to HEnd } .method public static int32 conv_un_5(int32,int32) { .locals (class [mscorlib]System.OverflowException,unsigned int32) .maxstack 2 try_start2: ldarg 0 conv.ovf.u4.un stloc.1 leave.s try_end2 try_end2: //- No exception ldloc.1 ldarg 1 ceq brfalse FAIL2 ldc.i4 0x11111111 br END2 aHandler2: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd2 HEnd2: ldloc 0 brfalse FAIL2 ldc.i4 0xEEEEEEEE br END2 FAIL2: ldc.i4 0x00000000 br END2 END2: ret .try try_start2 to try_end2 catch [mscorlib]System.OverflowException handler aHandler2 to HEnd2 } .method public static int32 conv_6(int32,int32) { .locals (class [mscorlib]System.OverflowException,int32) .maxstack 2 try_start: ldarg 0 conv.ovf.u4 //- No exception ldarg 1 ceq stloc.1 leave.s try_end try_end: ldloc.1 brfalse FAIL ldc.i4 0x11111111 br END aHandler: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd HEnd: ldloc 0 brfalse FAIL ldc.i4 0xEEEEEEEE br END FAIL: ldc.i4 0x00000000 br END END: ret .try try_start to try_end catch [mscorlib]System.OverflowException handler aHandler to HEnd } .method public static int32 conv_un_6(int32,int32) { .locals (class [mscorlib]System.OverflowException,unsigned int32) .maxstack 2 try_start2: ldarg 0 conv.ovf.u4.un stloc.1 leave.s try_end2 try_end2: //- No exception ldloc.1 ldarg 1 ceq brfalse FAIL2 ldc.i4 0x11111111 br END2 aHandler2: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd2 HEnd2: ldloc 0 brfalse FAIL2 ldc.i4 0xEEEEEEEE br END2 FAIL2: ldc.i4 0x00000000 br END2 END2: ret .try try_start2 to try_end2 catch [mscorlib]System.OverflowException handler aHandler2 to HEnd2 } .method public static int32 conv_7(int32,int32) { .locals (class [mscorlib]System.OverflowException,int32) .maxstack 2 try_start: ldarg 0 conv.ovf.u4 //- No exception ldarg 1 ceq stloc.1 leave.s try_end try_end: ldloc.1 brfalse FAIL ldc.i4 0x11111111 br END aHandler: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd HEnd: ldloc 0 brfalse FAIL ldc.i4 0xEEEEEEEE br END FAIL: ldc.i4 0x00000000 br END END: ret .try try_start to try_end catch [mscorlib]System.OverflowException handler aHandler to HEnd } .method public static int32 conv_un_7(int32,int32) { .locals (class [mscorlib]System.OverflowException,unsigned int32) .maxstack 2 try_start2: ldarg 0 conv.ovf.u4.un stloc.1 leave.s try_end2 try_end2: //- No exception ldloc.1 ldarg 1 ceq brfalse FAIL2 ldc.i4 0x11111111 br END2 aHandler2: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd2 HEnd2: ldloc 0 brfalse FAIL2 ldc.i4 0xEEEEEEEE br END2 FAIL2: ldc.i4 0x00000000 br END2 END2: ret .try try_start2 to try_end2 catch [mscorlib]System.OverflowException handler aHandler2 to HEnd2 } .method public static int32 conv_8(int32,int32) { .locals (class [mscorlib]System.OverflowException,int32) .maxstack 2 try_start: ldarg 0 conv.ovf.u4 //- No exception ldarg 1 ceq stloc.1 leave.s try_end try_end: ldloc.1 brfalse FAIL ldc.i4 0x11111111 br END aHandler: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd HEnd: ldloc 0 brfalse FAIL ldc.i4 0xEEEEEEEE br END FAIL: ldc.i4 0x00000000 br END END: ret .try try_start to try_end catch [mscorlib]System.OverflowException handler aHandler to HEnd } .method public static int32 conv_un_8(int32,int32) { .locals (class [mscorlib]System.OverflowException,unsigned int32) .maxstack 2 try_start2: ldarg 0 conv.ovf.u4.un stloc.1 leave.s try_end2 try_end2: //- No exception ldloc.1 ldarg 1 ceq brfalse FAIL2 ldc.i4 0x11111111 br END2 aHandler2: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd2 HEnd2: ldloc 0 brfalse FAIL2 ldc.i4 0xEEEEEEEE br END2 FAIL2: ldc.i4 0x00000000 br END2 END2: ret .try try_start2 to try_end2 catch [mscorlib]System.OverflowException handler aHandler2 to HEnd2 } .method public static int32 conv_9(int32,int32) { .locals (class [mscorlib]System.OverflowException,int32) .maxstack 2 try_start: ldarg 0 conv.ovf.u4 //- No exception ldarg 1 ceq stloc.1 leave.s try_end try_end: ldloc.1 brfalse FAIL ldc.i4 0x11111111 br END aHandler: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd HEnd: ldloc 0 brfalse FAIL ldc.i4 0xEEEEEEEE br END FAIL: ldc.i4 0x00000000 br END END: ret .try try_start to try_end catch [mscorlib]System.OverflowException handler aHandler to HEnd } .method public static int32 conv_un_9(int32,int32) { .locals (class [mscorlib]System.OverflowException,unsigned int32) .maxstack 2 try_start2: ldarg 0 conv.ovf.u4.un stloc.1 leave.s try_end2 try_end2: //- No exception ldloc.1 ldarg 1 ceq brfalse FAIL2 ldc.i4 0x11111111 br END2 aHandler2: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd2 HEnd2: ldloc 0 brfalse FAIL2 ldc.i4 0xEEEEEEEE br END2 FAIL2: ldc.i4 0x00000000 br END2 END2: ret .try try_start2 to try_end2 catch [mscorlib]System.OverflowException handler aHandler2 to HEnd2 } .method public static int32 conv_10(int32,int32) { .locals (class [mscorlib]System.OverflowException,int32) .maxstack 2 try_start: ldarg 0 conv.ovf.u4 //- No exception ldarg 1 ceq stloc.1 leave.s try_end try_end: ldloc.1 brfalse FAIL ldc.i4 0x11111111 br END aHandler: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd HEnd: ldloc 0 brfalse FAIL ldc.i4 0xEEEEEEEE br END FAIL: ldc.i4 0x00000000 br END END: ret .try try_start to try_end catch [mscorlib]System.OverflowException handler aHandler to HEnd } .method public static int32 conv_un_10(int32,int32) { .locals (class [mscorlib]System.OverflowException,unsigned int32) .maxstack 2 try_start2: ldarg 0 conv.ovf.u4.un stloc.1 leave.s try_end2 try_end2: //- No exception ldloc.1 ldarg 1 ceq brfalse FAIL2 ldc.i4 0x11111111 br END2 aHandler2: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd2 HEnd2: ldloc 0 brfalse FAIL2 ldc.i4 0xEEEEEEEE br END2 FAIL2: ldc.i4 0x00000000 br END2 END2: ret .try try_start2 to try_end2 catch [mscorlib]System.OverflowException handler aHandler2 to HEnd2 } .method public static int32 conv_11(int32,int32) { .locals (class [mscorlib]System.OverflowException,int32) .maxstack 2 try_start: ldarg 0 conv.ovf.u4 //- No exception ldarg 1 ceq stloc.1 leave.s try_end try_end: ldloc.1 brfalse FAIL ldc.i4 0x11111111 br END aHandler: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd HEnd: ldloc 0 brfalse FAIL ldc.i4 0xEEEEEEEE br END FAIL: ldc.i4 0x00000000 br END END: ret .try try_start to try_end catch [mscorlib]System.OverflowException handler aHandler to HEnd } .method public static int32 conv_un_11(int32,int32) { .locals (class [mscorlib]System.OverflowException,unsigned int32) .maxstack 2 try_start2: ldarg 0 conv.ovf.u4.un stloc.1 leave.s try_end2 try_end2: //- No exception ldloc.1 ldarg 1 ceq brfalse FAIL2 ldc.i4 0x11111111 br END2 aHandler2: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd2 HEnd2: ldloc 0 brfalse FAIL2 ldc.i4 0xEEEEEEEE br END2 FAIL2: ldc.i4 0x00000000 br END2 END2: ret .try try_start2 to try_end2 catch [mscorlib]System.OverflowException handler aHandler2 to HEnd2 } .method public static int32 conv_12(int32,int32) { .locals (class [mscorlib]System.OverflowException,int32) .maxstack 2 try_start: ldarg 0 conv.ovf.u4 //- No exception ldarg 1 ceq stloc.1 leave.s try_end try_end: ldloc.1 brfalse FAIL ldc.i4 0x11111111 br END aHandler: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd HEnd: ldloc 0 brfalse FAIL ldc.i4 0xEEEEEEEE br END FAIL: ldc.i4 0x00000000 br END END: ret .try try_start to try_end catch [mscorlib]System.OverflowException handler aHandler to HEnd } .method public static int32 conv_un_12(int32,int32) { .locals (class [mscorlib]System.OverflowException,unsigned int32) .maxstack 2 try_start2: ldarg 0 conv.ovf.u4.un stloc.1 leave.s try_end2 try_end2: //- No exception ldloc.1 ldarg 1 ceq brfalse FAIL2 ldc.i4 0x11111111 br END2 aHandler2: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd2 HEnd2: ldloc 0 brfalse FAIL2 ldc.i4 0xEEEEEEEE br END2 FAIL2: ldc.i4 0x00000000 br END2 END2: ret .try try_start2 to try_end2 catch [mscorlib]System.OverflowException handler aHandler2 to HEnd2 } .method public static int32 conv_13(int32,int32) { .locals (class [mscorlib]System.OverflowException,int32) .maxstack 2 try_start: ldarg 0 conv.ovf.u4 //- No exception ldarg 1 ceq stloc.1 leave.s try_end try_end: ldloc.1 brfalse FAIL ldc.i4 0x11111111 br END aHandler: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd HEnd: ldloc 0 brfalse FAIL ldc.i4 0xEEEEEEEE br END FAIL: ldc.i4 0x00000000 br END END: ret .try try_start to try_end catch [mscorlib]System.OverflowException handler aHandler to HEnd } .method public static int32 conv_un_13(int32,int32) { .locals (class [mscorlib]System.OverflowException,unsigned int32) .maxstack 2 try_start2: ldarg 0 conv.ovf.u4.un stloc.1 leave.s try_end2 try_end2: //- No exception ldloc.1 ldarg 1 ceq brfalse FAIL2 ldc.i4 0x11111111 br END2 aHandler2: //- Got an OverflowException isinst [mscorlib]System.OverflowException stloc 0 leave HEnd2 HEnd2: ldloc 0 brfalse FAIL2 ldc.i4 0xEEEEEEEE br END2 FAIL2: ldc.i4 0x00000000 br END2 END2: ret .try try_start2 to try_end2 catch [mscorlib]System.OverflowException handler aHandler2 to HEnd2 } //---- CONSTRUCTOR ---------- .method public void conv_ovf_u4() { .maxstack 1 ret } //---- MAIN ----------------- .method public static int32 main(class [mscorlib]System.String[]) { .entrypoint .maxstack 5 //====== begin testing ====== //-- min i4 -- ldc.i4 0x80000000 ldc.i4 0x00000000 call int32 conv_ovf_u4::conv_0(int32,int32) ldc.i4 0xEEEEEEEE ceq brfalse FAIL //-- -1 i4 -- ldc.i4 0xFFFFFFFF ldc.i4 0x00000000 call int32 conv_ovf_u4::conv_1(int32,int32) ldc.i4 0xEEEEEEEE ceq brfalse FAIL //-- 0 i4 -- ldc.i4 0x00000000 ldc.i4 0x00000000 call int32 conv_ovf_u4::conv_2(int32,int32) ldc.i4 0x11111111 ceq brfalse FAIL //-- +1 i4 -- ldc.i4 0x00000001 ldc.i4 0x00000001 call int32 conv_ovf_u4::conv_3(int32,int32) ldc.i4 0x11111111 ceq brfalse FAIL //-- max i4 -- ldc.i4 0x7FFFFFFF ldc.i4 0x7FFFFFFF call int32 conv_ovf_u4::conv_4(int32,int32) ldc.i4 0x11111111 ceq brfalse FAIL //-- Odd u4 -- ldc.i4 0x55555555 ldc.i4 0x55555555 call int32 conv_ovf_u4::conv_5(int32,int32) ldc.i4 0x11111111 ceq brfalse FAIL //-- Even u4 -- ldc.i4 0xAAAAAAAA ldc.i4 0x00000000 call int32 conv_ovf_u4::conv_6(int32,int32) ldc.i4 0xEEEEEEEE ceq brfalse FAIL //====== conv_un testing ====== //-- min i4 -- /* ldc.i4 0x80000000 ldc.i4 0x00000000 call int32 conv_ovf_u4::conv_un_7(int32,int32) ldc.i4 0xEEEEEEEE ceq brfalse FAIL //-- -1 i4 -- ldc.i4 0xFFFFFFFF ldc.i4 0x00000000 call int32 conv_ovf_u4::conv_un_8(int32,int32) ldc.i4 0xEEEEEEEE ceq brfalse FAIL *///-- 0 i4 -- ldc.i4 0x00000000 ldc.i4 0x00000000 call int32 conv_ovf_u4::conv_un_9(int32,int32) ldc.i4 0x11111111 ceq brfalse FAIL //-- +1 i4 -- ldc.i4 0x00000001 ldc.i4 0x00000001 call int32 conv_ovf_u4::conv_un_10(int32,int32) ldc.i4 0x11111111 ceq brfalse FAIL //-- max i4 -- ldc.i4 0x7FFFFFFF ldc.i4 0x7FFFFFFF call int32 conv_ovf_u4::conv_un_11(int32,int32) ldc.i4 0x11111111 ceq brfalse FAIL //-- Odd u4 -- ldc.i4 0x55555555 ldc.i4 0x55555555 call int32 conv_ovf_u4::conv_un_12(int32,int32) ldc.i4 0x11111111 ceq brfalse FAIL //-- Even u4 -- /* ldc.i4 0xAAAAAAAA ldc.i4 0x00000000 call int32 conv_ovf_u4::conv_un_13(int32,int32) ldc.i4 0xEEEEEEEE ceq brfalse FAIL *///====== end testing ======== //---- branch here on pass -- PASS: ldc.i4 100 br END //---- branch here on fail -- FAIL: ldc.i4 101 //---- return the result ---- END: ret //---- END OF METHOD -------- } //---- EOF ------------------ } .assembly ldc_conv_ovf_i4_u4{}
-1
dotnet/runtime
66,235
Revert "Fix issues related to JsonSerializerOptions mutation and caching."
Reverts dotnet/runtime#65863 Fixes #66232
jkotas
2022-03-05T06:23:45Z
2022-03-05T14:08:07Z
17662fc30cfd4cc6e7dbba978a3fb512380c0b70
3ede8095da51d5d27a890020b61376155e9a61c2
Revert "Fix issues related to JsonSerializerOptions mutation and caching.". Reverts dotnet/runtime#65863 Fixes #66232
./src/libraries/System.Data.Common/src/System/Data/Common/DbProviderFactory.CreatePermission.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Security; using System.Security.Permissions; // Additional implementation to keep public API in Mono (it has a reference to this file) // https://github.com/dotnet/runtime/issues/20232 namespace System.Data.Common { partial class DbProviderFactory { public virtual CodeAccessPermission CreatePermission(PermissionState state) => null; } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Security; using System.Security.Permissions; // Additional implementation to keep public API in Mono (it has a reference to this file) // https://github.com/dotnet/runtime/issues/20232 namespace System.Data.Common { partial class DbProviderFactory { public virtual CodeAccessPermission CreatePermission(PermissionState state) => null; } }
-1
dotnet/runtime
66,235
Revert "Fix issues related to JsonSerializerOptions mutation and caching."
Reverts dotnet/runtime#65863 Fixes #66232
jkotas
2022-03-05T06:23:45Z
2022-03-05T14:08:07Z
17662fc30cfd4cc6e7dbba978a3fb512380c0b70
3ede8095da51d5d27a890020b61376155e9a61c2
Revert "Fix issues related to JsonSerializerOptions mutation and caching.". Reverts dotnet/runtime#65863 Fixes #66232
./src/libraries/System.Text.Json/tests/System.Text.Json.Tests/JsonElementCloneTests.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Reflection; using Xunit; namespace System.Text.Json.Tests { public static class JsonElementCloneTests { [Fact] public static void CloneTwiceFromSameDocument() { string json = "[[]]"; JsonElement root; JsonElement clone; JsonElement clone2; using (JsonDocument doc = JsonDocument.Parse(json)) { root = doc.RootElement; clone = root.Clone(); clone2 = root.Clone(); Assert.Equal(json, clone.GetRawText()); Assert.NotSame(doc, clone.SniffDocument()); Assert.NotSame(doc, clone2.SniffDocument()); } // After document Dispose Assert.Equal(json, clone.GetRawText()); Assert.Equal(json, clone2.GetRawText()); Assert.NotSame(clone.SniffDocument(), clone2.SniffDocument()); Assert.Throws<ObjectDisposedException>(() => root.GetRawText()); } [Fact] public static void CloneInnerElementFromClonedElement() { JsonElement clone; using (JsonDocument doc = JsonDocument.Parse("[[[]]]")) { JsonElement middle = doc.RootElement[0].Clone(); JsonElement inner = middle[0]; clone = inner.Clone(); Assert.Equal(inner.GetRawText(), clone.GetRawText()); Assert.NotSame(doc, clone.SniffDocument()); Assert.Same(middle.SniffDocument(), clone.SniffDocument()); Assert.Same(inner.SniffDocument(), clone.SniffDocument()); } // After document Dispose Assert.Equal("[]", clone.GetRawText()); } [Fact] public static void CloneAtInnerNumber() { CloneAtInner("1.21e9", JsonValueKind.Number); } [Fact] public static void CloneAtInnerString() { CloneAtInner("\" this string has \\u0039 spaces\"", JsonValueKind.String); } [Fact] public static void CloneAtInnerTrue() { CloneAtInner("true", JsonValueKind.True); } [Fact] public static void CloneAtInnerFalse() { CloneAtInner("false", JsonValueKind.False); } [Fact] public static void CloneAtInnerNull() { CloneAtInner("null", JsonValueKind.Null); } [Fact] public static void CloneAtInnerObject() { // Very weird whitespace is used here just to ensure that the // clone API isn't making any whitespace assumptions. CloneAtInner( @"{ ""this"": [ { ""object"": 0, ""has"": [ ""whitespace"" ] } ] }", JsonValueKind.Object); } [Fact] public static void CloneAtInnerArray() { // Very weird whitespace is used here just to ensure that the // clone API isn't making any whitespace assumptions. CloneAtInner( @"[ { ""this"": [ { ""object"": 0, ""has"": [ ""whitespace"" ] } ] }, 5 , false, null ]", JsonValueKind.Array); } private static void CloneAtInner(string innerJson, JsonValueKind valueType) { string json = $"{{ \"obj\": [ {{ \"not target\": true, \"target\": {innerJson} }}, 5 ] }}"; JsonElement clone; using (JsonDocument doc = JsonDocument.Parse(json)) { JsonElement target = doc.RootElement.GetProperty("obj")[0].GetProperty("target"); Assert.Equal(valueType, target.ValueKind); clone = target.Clone(); } Assert.Equal(innerJson, clone.GetRawText()); } private static JsonDocument SniffDocument(this JsonElement element) { return (JsonDocument)typeof(JsonElement). GetField("_parent", BindingFlags.Instance|BindingFlags.NonPublic). GetValue(element); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Reflection; using Xunit; namespace System.Text.Json.Tests { public static class JsonElementCloneTests { [Fact] public static void CloneTwiceFromSameDocument() { string json = "[[]]"; JsonElement root; JsonElement clone; JsonElement clone2; using (JsonDocument doc = JsonDocument.Parse(json)) { root = doc.RootElement; clone = root.Clone(); clone2 = root.Clone(); Assert.Equal(json, clone.GetRawText()); Assert.NotSame(doc, clone.SniffDocument()); Assert.NotSame(doc, clone2.SniffDocument()); } // After document Dispose Assert.Equal(json, clone.GetRawText()); Assert.Equal(json, clone2.GetRawText()); Assert.NotSame(clone.SniffDocument(), clone2.SniffDocument()); Assert.Throws<ObjectDisposedException>(() => root.GetRawText()); } [Fact] public static void CloneInnerElementFromClonedElement() { JsonElement clone; using (JsonDocument doc = JsonDocument.Parse("[[[]]]")) { JsonElement middle = doc.RootElement[0].Clone(); JsonElement inner = middle[0]; clone = inner.Clone(); Assert.Equal(inner.GetRawText(), clone.GetRawText()); Assert.NotSame(doc, clone.SniffDocument()); Assert.Same(middle.SniffDocument(), clone.SniffDocument()); Assert.Same(inner.SniffDocument(), clone.SniffDocument()); } // After document Dispose Assert.Equal("[]", clone.GetRawText()); } [Fact] public static void CloneAtInnerNumber() { CloneAtInner("1.21e9", JsonValueKind.Number); } [Fact] public static void CloneAtInnerString() { CloneAtInner("\" this string has \\u0039 spaces\"", JsonValueKind.String); } [Fact] public static void CloneAtInnerTrue() { CloneAtInner("true", JsonValueKind.True); } [Fact] public static void CloneAtInnerFalse() { CloneAtInner("false", JsonValueKind.False); } [Fact] public static void CloneAtInnerNull() { CloneAtInner("null", JsonValueKind.Null); } [Fact] public static void CloneAtInnerObject() { // Very weird whitespace is used here just to ensure that the // clone API isn't making any whitespace assumptions. CloneAtInner( @"{ ""this"": [ { ""object"": 0, ""has"": [ ""whitespace"" ] } ] }", JsonValueKind.Object); } [Fact] public static void CloneAtInnerArray() { // Very weird whitespace is used here just to ensure that the // clone API isn't making any whitespace assumptions. CloneAtInner( @"[ { ""this"": [ { ""object"": 0, ""has"": [ ""whitespace"" ] } ] }, 5 , false, null ]", JsonValueKind.Array); } private static void CloneAtInner(string innerJson, JsonValueKind valueType) { string json = $"{{ \"obj\": [ {{ \"not target\": true, \"target\": {innerJson} }}, 5 ] }}"; JsonElement clone; using (JsonDocument doc = JsonDocument.Parse(json)) { JsonElement target = doc.RootElement.GetProperty("obj")[0].GetProperty("target"); Assert.Equal(valueType, target.ValueKind); clone = target.Clone(); } Assert.Equal(innerJson, clone.GetRawText()); } private static JsonDocument SniffDocument(this JsonElement element) { return (JsonDocument)typeof(JsonElement). GetField("_parent", BindingFlags.Instance|BindingFlags.NonPublic). GetValue(element); } } }
-1
dotnet/runtime
66,235
Revert "Fix issues related to JsonSerializerOptions mutation and caching."
Reverts dotnet/runtime#65863 Fixes #66232
jkotas
2022-03-05T06:23:45Z
2022-03-05T14:08:07Z
17662fc30cfd4cc6e7dbba978a3fb512380c0b70
3ede8095da51d5d27a890020b61376155e9a61c2
Revert "Fix issues related to JsonSerializerOptions mutation and caching.". Reverts dotnet/runtime#65863 Fixes #66232
./src/libraries/System.Private.Xml/src/System/Xml/XPath/Internal/ReversePositionQuery.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Xml.XPath; namespace MS.Internal.Xml.XPath { internal sealed class ReversePositionQuery : ForwardPositionQuery { public ReversePositionQuery(Query input) : base(input) { } private ReversePositionQuery(ReversePositionQuery other) : base(other) { } public override XPathNodeIterator Clone() { return new ReversePositionQuery(this); } public override int CurrentPosition { get { return outputBuffer.Count - count + 1; } } public override QueryProps Properties { get { return base.Properties | QueryProps.Reverse; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Xml.XPath; namespace MS.Internal.Xml.XPath { internal sealed class ReversePositionQuery : ForwardPositionQuery { public ReversePositionQuery(Query input) : base(input) { } private ReversePositionQuery(ReversePositionQuery other) : base(other) { } public override XPathNodeIterator Clone() { return new ReversePositionQuery(this); } public override int CurrentPosition { get { return outputBuffer.Count - count + 1; } } public override QueryProps Properties { get { return base.Properties | QueryProps.Reverse; } } } }
-1
dotnet/runtime
66,235
Revert "Fix issues related to JsonSerializerOptions mutation and caching."
Reverts dotnet/runtime#65863 Fixes #66232
jkotas
2022-03-05T06:23:45Z
2022-03-05T14:08:07Z
17662fc30cfd4cc6e7dbba978a3fb512380c0b70
3ede8095da51d5d27a890020b61376155e9a61c2
Revert "Fix issues related to JsonSerializerOptions mutation and caching.". Reverts dotnet/runtime#65863 Fixes #66232
./src/libraries/System.Drawing.Common/src/System/Drawing/Printing/DefaultPrintController.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.ComponentModel; using System.Diagnostics; using System.Drawing.Internal; using System.Runtime.InteropServices; namespace System.Drawing.Printing { /// <summary> /// Specifies a print controller that sends information to a printer. /// </summary> public class StandardPrintController : PrintController { private DeviceContext? _dc; private Graphics? _graphics; /// <summary> /// Implements StartPrint for printing to a physical printer. /// </summary> public override void OnStartPrint(PrintDocument document, PrintEventArgs e) { Debug.Assert(_dc == null && _graphics == null, "PrintController methods called in the wrong order?"); Debug.Assert(_modeHandle != null); base.OnStartPrint(document, e); // the win32 methods below SuppressUnmanagedCodeAttributes so assertin on UnmanagedCodePermission is redundant if (!document.PrinterSettings.IsValid) throw new InvalidPrinterException(document.PrinterSettings); _dc = document.PrinterSettings.CreateDeviceContext(_modeHandle); Interop.Gdi32.DOCINFO info = new Interop.Gdi32.DOCINFO(); info.lpszDocName = document.DocumentName; if (document.PrinterSettings.PrintToFile) info.lpszOutput = document.PrinterSettings.OutputPort; //This will be "FILE:" else info.lpszOutput = null; info.lpszDatatype = null; info.fwType = 0; int result = Interop.Gdi32.StartDoc(new HandleRef(_dc, _dc.Hdc), info); if (result <= 0) { int error = Marshal.GetLastWin32Error(); if (error == SafeNativeMethods.ERROR_CANCELLED) { e.Cancel = true; } else { throw new Win32Exception(error); } } } /// <summary> /// Implements StartPage for printing to a physical printer. /// </summary> public override Graphics OnStartPage(PrintDocument document, PrintPageEventArgs e) { Debug.Assert(_dc != null && _graphics == null, "PrintController methods called in the wrong order?"); Debug.Assert(_modeHandle != null); base.OnStartPage(document, e); e.PageSettings.CopyToHdevmode(_modeHandle); IntPtr modePointer = Interop.Kernel32.GlobalLock(new HandleRef(this, _modeHandle)); try { IntPtr result = Interop.Gdi32.ResetDC(new HandleRef(_dc, _dc.Hdc), new HandleRef(null, modePointer)); Debug.Assert(result == _dc.Hdc, "ResetDC didn't return the same handle I gave it"); } finally { Interop.Kernel32.GlobalUnlock(new HandleRef(this, _modeHandle)); } _graphics = Graphics.FromHdcInternal(_dc.Hdc); if (document.OriginAtMargins) { // Adjust the origin of the graphics object to be at the // user-specified margin location // int dpiX = Interop.Gdi32.GetDeviceCaps(new HandleRef(_dc, _dc.Hdc), Interop.Gdi32.DeviceCapability.LOGPIXELSX); int dpiY = Interop.Gdi32.GetDeviceCaps(new HandleRef(_dc, _dc.Hdc), Interop.Gdi32.DeviceCapability.LOGPIXELSY); int hardMarginX_DU = Interop.Gdi32.GetDeviceCaps(new HandleRef(_dc, _dc.Hdc), Interop.Gdi32.DeviceCapability.PHYSICALOFFSETX); int hardMarginY_DU = Interop.Gdi32.GetDeviceCaps(new HandleRef(_dc, _dc.Hdc), Interop.Gdi32.DeviceCapability.PHYSICALOFFSETY); float hardMarginX = hardMarginX_DU * 100 / dpiX; float hardMarginY = hardMarginY_DU * 100 / dpiY; _graphics.TranslateTransform(-hardMarginX, -hardMarginY); _graphics.TranslateTransform(document.DefaultPageSettings.Margins.Left, document.DefaultPageSettings.Margins.Top); } int result2 = Interop.Gdi32.StartPage(new HandleRef(_dc, _dc.Hdc)); if (result2 <= 0) throw new Win32Exception(); return _graphics; } /// <summary> /// Implements EndPage for printing to a physical printer. /// </summary> public override void OnEndPage(PrintDocument document, PrintPageEventArgs e) { Debug.Assert(_dc != null && _graphics != null, "PrintController methods called in the wrong order?"); try { int result = Interop.Gdi32.EndPage(new HandleRef(_dc, _dc.Hdc)); if (result <= 0) throw new Win32Exception(); } finally { _graphics.Dispose(); // Dispose of GDI+ Graphics; keep the DC _graphics = null; } base.OnEndPage(document, e); } /// <summary> /// Implements EndPrint for printing to a physical printer. /// </summary> public override void OnEndPrint(PrintDocument document, PrintEventArgs e) { Debug.Assert(_dc != null && _graphics == null, "PrintController methods called in the wrong order?"); if (_dc != null) { try { int result = (e.Cancel) ? Interop.Gdi32.AbortDoc(new HandleRef(_dc, _dc.Hdc)) : Interop.Gdi32.EndDoc(new HandleRef(_dc, _dc.Hdc)); if (result <= 0) throw new Win32Exception(); } finally { _dc.Dispose(); _dc = null; } } base.OnEndPrint(document, e); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.ComponentModel; using System.Diagnostics; using System.Drawing.Internal; using System.Runtime.InteropServices; namespace System.Drawing.Printing { /// <summary> /// Specifies a print controller that sends information to a printer. /// </summary> public class StandardPrintController : PrintController { private DeviceContext? _dc; private Graphics? _graphics; /// <summary> /// Implements StartPrint for printing to a physical printer. /// </summary> public override void OnStartPrint(PrintDocument document, PrintEventArgs e) { Debug.Assert(_dc == null && _graphics == null, "PrintController methods called in the wrong order?"); Debug.Assert(_modeHandle != null); base.OnStartPrint(document, e); // the win32 methods below SuppressUnmanagedCodeAttributes so assertin on UnmanagedCodePermission is redundant if (!document.PrinterSettings.IsValid) throw new InvalidPrinterException(document.PrinterSettings); _dc = document.PrinterSettings.CreateDeviceContext(_modeHandle); Interop.Gdi32.DOCINFO info = new Interop.Gdi32.DOCINFO(); info.lpszDocName = document.DocumentName; if (document.PrinterSettings.PrintToFile) info.lpszOutput = document.PrinterSettings.OutputPort; //This will be "FILE:" else info.lpszOutput = null; info.lpszDatatype = null; info.fwType = 0; int result = Interop.Gdi32.StartDoc(new HandleRef(_dc, _dc.Hdc), info); if (result <= 0) { int error = Marshal.GetLastWin32Error(); if (error == SafeNativeMethods.ERROR_CANCELLED) { e.Cancel = true; } else { throw new Win32Exception(error); } } } /// <summary> /// Implements StartPage for printing to a physical printer. /// </summary> public override Graphics OnStartPage(PrintDocument document, PrintPageEventArgs e) { Debug.Assert(_dc != null && _graphics == null, "PrintController methods called in the wrong order?"); Debug.Assert(_modeHandle != null); base.OnStartPage(document, e); e.PageSettings.CopyToHdevmode(_modeHandle); IntPtr modePointer = Interop.Kernel32.GlobalLock(new HandleRef(this, _modeHandle)); try { IntPtr result = Interop.Gdi32.ResetDC(new HandleRef(_dc, _dc.Hdc), new HandleRef(null, modePointer)); Debug.Assert(result == _dc.Hdc, "ResetDC didn't return the same handle I gave it"); } finally { Interop.Kernel32.GlobalUnlock(new HandleRef(this, _modeHandle)); } _graphics = Graphics.FromHdcInternal(_dc.Hdc); if (document.OriginAtMargins) { // Adjust the origin of the graphics object to be at the // user-specified margin location // int dpiX = Interop.Gdi32.GetDeviceCaps(new HandleRef(_dc, _dc.Hdc), Interop.Gdi32.DeviceCapability.LOGPIXELSX); int dpiY = Interop.Gdi32.GetDeviceCaps(new HandleRef(_dc, _dc.Hdc), Interop.Gdi32.DeviceCapability.LOGPIXELSY); int hardMarginX_DU = Interop.Gdi32.GetDeviceCaps(new HandleRef(_dc, _dc.Hdc), Interop.Gdi32.DeviceCapability.PHYSICALOFFSETX); int hardMarginY_DU = Interop.Gdi32.GetDeviceCaps(new HandleRef(_dc, _dc.Hdc), Interop.Gdi32.DeviceCapability.PHYSICALOFFSETY); float hardMarginX = hardMarginX_DU * 100 / dpiX; float hardMarginY = hardMarginY_DU * 100 / dpiY; _graphics.TranslateTransform(-hardMarginX, -hardMarginY); _graphics.TranslateTransform(document.DefaultPageSettings.Margins.Left, document.DefaultPageSettings.Margins.Top); } int result2 = Interop.Gdi32.StartPage(new HandleRef(_dc, _dc.Hdc)); if (result2 <= 0) throw new Win32Exception(); return _graphics; } /// <summary> /// Implements EndPage for printing to a physical printer. /// </summary> public override void OnEndPage(PrintDocument document, PrintPageEventArgs e) { Debug.Assert(_dc != null && _graphics != null, "PrintController methods called in the wrong order?"); try { int result = Interop.Gdi32.EndPage(new HandleRef(_dc, _dc.Hdc)); if (result <= 0) throw new Win32Exception(); } finally { _graphics.Dispose(); // Dispose of GDI+ Graphics; keep the DC _graphics = null; } base.OnEndPage(document, e); } /// <summary> /// Implements EndPrint for printing to a physical printer. /// </summary> public override void OnEndPrint(PrintDocument document, PrintEventArgs e) { Debug.Assert(_dc != null && _graphics == null, "PrintController methods called in the wrong order?"); if (_dc != null) { try { int result = (e.Cancel) ? Interop.Gdi32.AbortDoc(new HandleRef(_dc, _dc.Hdc)) : Interop.Gdi32.EndDoc(new HandleRef(_dc, _dc.Hdc)); if (result <= 0) throw new Win32Exception(); } finally { _dc.Dispose(); _dc = null; } } base.OnEndPrint(document, e); } } }
-1
dotnet/runtime
66,235
Revert "Fix issues related to JsonSerializerOptions mutation and caching."
Reverts dotnet/runtime#65863 Fixes #66232
jkotas
2022-03-05T06:23:45Z
2022-03-05T14:08:07Z
17662fc30cfd4cc6e7dbba978a3fb512380c0b70
3ede8095da51d5d27a890020b61376155e9a61c2
Revert "Fix issues related to JsonSerializerOptions mutation and caching.". Reverts dotnet/runtime#65863 Fixes #66232
./src/coreclr/vm/typehash.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // File: typehash.h // // #ifndef _TYPE_HASH_H #define _TYPE_HASH_H #include "dacenumerablehash.h" //======================================================================================== // This hash table is used by class loaders to look up constructed types: // arrays, pointers and instantiations of user-defined generic types. // // Each persisted module structure has an EETypeHashTable used for constructed types that // were ngen'ed into that module. See ceeload.hpp for more information about ngen modules. // // Types created at runtime are placed in an EETypeHashTable in BaseDomain. // // Keys are derivable from the data stored in the table (TypeHandle) // - for an instantiated type, the typedef module, typedef token, and instantiation // - for an array/pointer type, the CorElementType, rank, and type parameter // //======================================================================================== DWORD HashTypeKey(TypeKey* pKey); // One of these is present for each element in the table // It simply chains together (hash,data) pairs typedef DPTR(struct EETypeHashEntry) PTR_EETypeHashEntry; typedef struct EETypeHashEntry { TypeHandle GetTypeHandle(); void SetTypeHandle(TypeHandle handle); #ifndef DACCESS_COMPILE EETypeHashEntry& operator=(const EETypeHashEntry& src) { m_data = src.m_data; return *this; } #endif // !DACCESS_COMPILE PTR_VOID GetData() { return m_data; } private: friend class EETypeHashTable; PTR_VOID m_data; } EETypeHashEntry_t; // The type hash table itself typedef DPTR(class EETypeHashTable) PTR_EETypeHashTable; class EETypeHashTable : public DacEnumerableHashTable<EETypeHashTable, EETypeHashEntry, 2> { public: // This is the domain in which the hash table is allocated PTR_LoaderAllocator m_pAllocator; #ifdef _DEBUG private: Volatile<LONG> m_dwSealCount; // Can more types be added to the table? public: void InitUnseal() { LIMITED_METHOD_CONTRACT; m_dwSealCount = 0; } bool IsUnsealed() { LIMITED_METHOD_CONTRACT; return (m_dwSealCount == 0); } void Seal() { LIMITED_METHOD_CONTRACT; FastInterlockIncrement(&m_dwSealCount); } void Unseal() { LIMITED_METHOD_CONTRACT; FastInterlockDecrement(&m_dwSealCount); } #endif // _DEBUG private: #ifndef DACCESS_COMPILE EETypeHashTable(); ~EETypeHashTable(); #endif public: static EETypeHashTable *Create(LoaderAllocator *pAllocator, Module *pModule, DWORD dwNumBuckets, AllocMemTracker *pamTracker); private: #ifndef DACCESS_COMPILE EETypeHashTable(Module *pModule, LoaderHeap *pHeap, DWORD cInitialBuckets) : DacEnumerableHashTable<EETypeHashTable, EETypeHashEntry, 2>(pModule, pHeap, cInitialBuckets) {} #endif void operator delete(void *p); public: // Insert a value in the hash table, key implicit in data // Value must not be present in the table already VOID InsertValue(TypeHandle data); // Look up a value in the hash table, key explicit in pKey // Return a null type handle if not found TypeHandle GetValue(TypeKey* pKey); BOOL ContainsValue(TypeHandle th); // An iterator for the table class Iterator { public: // This iterator can be reused for walking different tables void Reset(); Iterator(); Iterator(EETypeHashTable * pTable); ~Iterator(); private: friend class EETypeHashTable; void Init(); EETypeHashTable *m_pTable; BaseIterator m_sIterator; bool m_fIterating; }; BOOL FindNext(Iterator *it, EETypeHashEntry **ppEntry); DWORD GetCount(); #ifdef DACCESS_COMPILE void EnumMemoryRegionsForEntry(EETypeHashEntry_t *pEntry, CLRDataEnumMemoryFlags flags); #endif private: EETypeHashEntry_t * FindItem(TypeKey* pKey); BOOL CompareInstantiatedType(TypeHandle t, Module *pModule, mdTypeDef token, Instantiation inst); BOOL CompareFnPtrType(TypeHandle t, BYTE callConv, DWORD numArgs, TypeHandle *retAndArgTypes); BOOL GrowHashTable(); LoaderAllocator* GetLoaderAllocator(); }; #endif /* _TYPE_HASH_H */
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // File: typehash.h // // #ifndef _TYPE_HASH_H #define _TYPE_HASH_H #include "dacenumerablehash.h" //======================================================================================== // This hash table is used by class loaders to look up constructed types: // arrays, pointers and instantiations of user-defined generic types. // // Each persisted module structure has an EETypeHashTable used for constructed types that // were ngen'ed into that module. See ceeload.hpp for more information about ngen modules. // // Types created at runtime are placed in an EETypeHashTable in BaseDomain. // // Keys are derivable from the data stored in the table (TypeHandle) // - for an instantiated type, the typedef module, typedef token, and instantiation // - for an array/pointer type, the CorElementType, rank, and type parameter // //======================================================================================== DWORD HashTypeKey(TypeKey* pKey); // One of these is present for each element in the table // It simply chains together (hash,data) pairs typedef DPTR(struct EETypeHashEntry) PTR_EETypeHashEntry; typedef struct EETypeHashEntry { TypeHandle GetTypeHandle(); void SetTypeHandle(TypeHandle handle); #ifndef DACCESS_COMPILE EETypeHashEntry& operator=(const EETypeHashEntry& src) { m_data = src.m_data; return *this; } #endif // !DACCESS_COMPILE PTR_VOID GetData() { return m_data; } private: friend class EETypeHashTable; PTR_VOID m_data; } EETypeHashEntry_t; // The type hash table itself typedef DPTR(class EETypeHashTable) PTR_EETypeHashTable; class EETypeHashTable : public DacEnumerableHashTable<EETypeHashTable, EETypeHashEntry, 2> { public: // This is the domain in which the hash table is allocated PTR_LoaderAllocator m_pAllocator; #ifdef _DEBUG private: Volatile<LONG> m_dwSealCount; // Can more types be added to the table? public: void InitUnseal() { LIMITED_METHOD_CONTRACT; m_dwSealCount = 0; } bool IsUnsealed() { LIMITED_METHOD_CONTRACT; return (m_dwSealCount == 0); } void Seal() { LIMITED_METHOD_CONTRACT; FastInterlockIncrement(&m_dwSealCount); } void Unseal() { LIMITED_METHOD_CONTRACT; FastInterlockDecrement(&m_dwSealCount); } #endif // _DEBUG private: #ifndef DACCESS_COMPILE EETypeHashTable(); ~EETypeHashTable(); #endif public: static EETypeHashTable *Create(LoaderAllocator *pAllocator, Module *pModule, DWORD dwNumBuckets, AllocMemTracker *pamTracker); private: #ifndef DACCESS_COMPILE EETypeHashTable(Module *pModule, LoaderHeap *pHeap, DWORD cInitialBuckets) : DacEnumerableHashTable<EETypeHashTable, EETypeHashEntry, 2>(pModule, pHeap, cInitialBuckets) {} #endif void operator delete(void *p); public: // Insert a value in the hash table, key implicit in data // Value must not be present in the table already VOID InsertValue(TypeHandle data); // Look up a value in the hash table, key explicit in pKey // Return a null type handle if not found TypeHandle GetValue(TypeKey* pKey); BOOL ContainsValue(TypeHandle th); // An iterator for the table class Iterator { public: // This iterator can be reused for walking different tables void Reset(); Iterator(); Iterator(EETypeHashTable * pTable); ~Iterator(); private: friend class EETypeHashTable; void Init(); EETypeHashTable *m_pTable; BaseIterator m_sIterator; bool m_fIterating; }; BOOL FindNext(Iterator *it, EETypeHashEntry **ppEntry); DWORD GetCount(); #ifdef DACCESS_COMPILE void EnumMemoryRegionsForEntry(EETypeHashEntry_t *pEntry, CLRDataEnumMemoryFlags flags); #endif private: EETypeHashEntry_t * FindItem(TypeKey* pKey); BOOL CompareInstantiatedType(TypeHandle t, Module *pModule, mdTypeDef token, Instantiation inst); BOOL CompareFnPtrType(TypeHandle t, BYTE callConv, DWORD numArgs, TypeHandle *retAndArgTypes); BOOL GrowHashTable(); LoaderAllocator* GetLoaderAllocator(); }; #endif /* _TYPE_HASH_H */
-1
dotnet/runtime
66,235
Revert "Fix issues related to JsonSerializerOptions mutation and caching."
Reverts dotnet/runtime#65863 Fixes #66232
jkotas
2022-03-05T06:23:45Z
2022-03-05T14:08:07Z
17662fc30cfd4cc6e7dbba978a3fb512380c0b70
3ede8095da51d5d27a890020b61376155e9a61c2
Revert "Fix issues related to JsonSerializerOptions mutation and caching.". Reverts dotnet/runtime#65863 Fixes #66232
./src/coreclr/tools/aot/ILCompiler.TypeSystem.Tests/HashcodeTests.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using Internal.TypeSystem; using Internal.NativeFormat; using Xunit; namespace TypeSystemTests { public class HashcodeTests { TestTypeSystemContext _context; ModuleDesc _testModule; public HashcodeTests() { _context = new TestTypeSystemContext(TargetArchitecture.X64); var systemModule = _context.CreateModuleForSimpleName("CoreTestAssembly"); _context.SetSystemModule(systemModule); _testModule = systemModule; } [Fact] public void TestMultidimensionalArrays() { DefType systemArrayType = _context.GetWellKnownType(WellKnownType.Array); TypeDesc objectType = _context.GetWellKnownType(WellKnownType.Object); ArrayType objectMDArrayRank1 = _context.GetArrayType(objectType, 1); ArrayType objectMDArrayRank2 = _context.GetArrayType(objectType, 2); ArrayType objectMDArrayRank3 = _context.GetArrayType(objectType, 3); Assert.Equal(TypeHashingAlgorithms.ComputeArrayTypeHashCode(objectType.GetHashCode(), 1), objectMDArrayRank1.GetHashCode()); Assert.Equal(TypeHashingAlgorithms.ComputeArrayTypeHashCode(objectType.GetHashCode(), 2), objectMDArrayRank2.GetHashCode()); Assert.Equal(TypeHashingAlgorithms.ComputeArrayTypeHashCode(objectType.GetHashCode(), 3), objectMDArrayRank3.GetHashCode()); } [Fact] public void TestSingleDimensionalArrays() { DefType systemArrayType = _context.GetWellKnownType(WellKnownType.Array); TypeDesc objectType = _context.GetWellKnownType(WellKnownType.Object); ArrayType objectArray = _context.GetArrayType(objectType); Assert.Equal(TypeHashingAlgorithms.ComputeArrayTypeHashCode(objectType.GetHashCode(), -1), objectArray.GetHashCode()); } [Fact] public void TestNonGenericTypes() { DefType systemArrayType = _context.GetWellKnownType(WellKnownType.Array); MetadataType nonNestedType = (MetadataType)_testModule.GetType("Hashcode", "NonNestedType"); TypeDesc nestedType = nonNestedType.GetNestedType("NestedType"); int expectedNonNestedTypeHashcode = TypeHashingAlgorithms.ComputeNameHashCode("Hashcode.NonNestedType"); int expectedNestedTypeNameHashcode = TypeHashingAlgorithms.ComputeNameHashCode("NestedType"); int expectedNestedTypeHashcode = TypeHashingAlgorithms.ComputeNestedTypeHashCode(expectedNonNestedTypeHashcode, expectedNestedTypeNameHashcode); Assert.Equal(expectedNonNestedTypeHashcode, nonNestedType.GetHashCode()); Assert.Equal(expectedNestedTypeHashcode, nestedType.GetHashCode()); } [Fact] void TestGenericTypes() { MetadataType ilistType = (MetadataType)_testModule.GetType("System.Collections.Generic", "IList`1"); DefType systemArrayType = _context.GetWellKnownType(WellKnownType.Array); DefType ilistOfSystemArray = ilistType.MakeInstantiatedType(systemArrayType); int expectedIListOfTHashcode = TypeHashingAlgorithms.ComputeNameHashCode("System.Collections.Generic.IList`1"); int expectedSystemArrayHashcode = TypeHashingAlgorithms.ComputeNameHashCode("System.Array"); Assert.Equal(expectedIListOfTHashcode, ilistType.GetHashCode()); Assert.Equal(TypeHashingAlgorithms.ComputeGenericInstanceHashCode(expectedIListOfTHashcode, new int[] { expectedSystemArrayHashcode }), ilistOfSystemArray.GetHashCode()); } [Fact] public void TestInstantiatedMethods() { MetadataType nonNestedType = (MetadataType)_testModule.GetType("Hashcode", "NonNestedType"); MetadataType genericType = (MetadataType)_testModule.GetType("Hashcode", "GenericType`2"); DefType intType = _context.GetWellKnownType(WellKnownType.Int32); DefType stringType = _context.GetWellKnownType(WellKnownType.String); MetadataType genericTypeOfIntString = genericType.MakeInstantiatedType(intType, stringType); MetadataType genericTypeOfStringInt = genericType.MakeInstantiatedType(stringType, intType); // build up expected hash codes for the above int expHashNonNestedType = TypeHashingAlgorithms.ComputeNameHashCode("Hashcode.NonNestedType"); Assert.Equal(expHashNonNestedType, nonNestedType.GetHashCode()); int expHashGenType = TypeHashingAlgorithms.ComputeNameHashCode("Hashcode.GenericType`2"); Assert.Equal(expHashGenType, genericType.GetHashCode()); int expHashInt = TypeHashingAlgorithms.ComputeNameHashCode("System.Int32"); Assert.Equal(expHashInt, intType.GetHashCode()); int expHashString = TypeHashingAlgorithms.ComputeNameHashCode("System.String"); Assert.Equal(expHashString, stringType.GetHashCode()); int expHashGenTypeOfIS = TypeHashingAlgorithms.ComputeGenericInstanceHashCode(expHashGenType, new int[] { expHashInt, expHashString }); Assert.Equal(expHashGenTypeOfIS, genericTypeOfIntString.GetHashCode()); int expHashGenTypeOfSI = TypeHashingAlgorithms.ComputeGenericInstanceHashCode(expHashGenType, new int[] { expHashString, expHashInt }); Assert.Equal(expHashGenTypeOfSI, genericTypeOfStringInt.GetHashCode()); // Test that instantiated method's have the right hashes int genMethodNameHash = TypeHashingAlgorithms.ComputeNameHashCode("GenericMethod"); int genMethodNameAndIHash = TypeHashingAlgorithms.ComputeGenericInstanceHashCode(genMethodNameHash, new int[] { expHashInt }); int genMethodNameAndSHash = TypeHashingAlgorithms.ComputeGenericInstanceHashCode(genMethodNameHash, new int[] { expHashString }); Action<MetadataType, int> testSequence = (MetadataType typeWithGenericMethod, int expectedTypeHash) => { // Uninstantiated Generic method MethodDesc genMethod = typeWithGenericMethod.GetMethod("GenericMethod", null); Assert.Equal(TypeHashingAlgorithms.ComputeMethodHashCode(expectedTypeHash, genMethodNameHash), genMethod.GetHashCode()); // Instantiated over int MethodDesc genMethodI = genMethod.MakeInstantiatedMethod(intType); Assert.Equal(TypeHashingAlgorithms.ComputeMethodHashCode(expectedTypeHash, genMethodNameAndIHash), genMethodI.GetHashCode()); // Instantiated over string MethodDesc genMethodS = genMethod.MakeInstantiatedMethod(stringType); Assert.Equal(TypeHashingAlgorithms.ComputeMethodHashCode(expectedTypeHash, genMethodNameAndSHash), genMethodS.GetHashCode()); // Assert they aren't the same as the other hashes Assert.NotEqual(genMethodI.GetHashCode(), genMethodS.GetHashCode()); Assert.NotEqual(genMethodI.GetHashCode(), genMethod.GetHashCode()); Assert.NotEqual(genMethodS.GetHashCode(), genMethod.GetHashCode()); }; // Test cases on non-generic type testSequence(nonNestedType, expHashNonNestedType); // Test cases on generic type testSequence(genericType, expHashGenType); // Test cases on instantiated generic type testSequence(genericTypeOfIntString, expHashGenTypeOfIS); testSequence(genericTypeOfStringInt, expHashGenTypeOfSI); } [Fact] public void TestPointerTypes() { DefType intType = _context.GetWellKnownType(WellKnownType.Int32); int expHashInt = TypeHashingAlgorithms.ComputeNameHashCode("System.Int32"); Assert.Equal(expHashInt, intType.GetHashCode()); int expHashIntPointer = TypeHashingAlgorithms.ComputePointerTypeHashCode(expHashInt); TypeDesc intPointerType = _context.GetPointerType(intType); Assert.Equal(expHashIntPointer, intPointerType.GetHashCode()); } [Fact] public void TestFunctionPointerTypes() { DefType intType = _context.GetWellKnownType(WellKnownType.Int32); DefType objectType = _context.GetWellKnownType(WellKnownType.Object); int expHashInt = TypeHashingAlgorithms.ComputeNameHashCode("System.Int32"); int expHashObject = TypeHashingAlgorithms.ComputeNameHashCode("System.Object"); int expHashFnPtr = TypeHashingAlgorithms.ComputeMethodSignatureHashCode(expHashInt, new[] { expHashObject }); MethodSignature fnPtrSig = new MethodSignature(MethodSignatureFlags.None, 0, intType, new TypeDesc[] { objectType }); var fnPtrType = _context.GetFunctionPointerType(fnPtrSig); Assert.Equal(expHashFnPtr, fnPtrType.GetHashCode()); } [Fact] public void TestByRefTypes() { DefType intType = _context.GetWellKnownType(WellKnownType.Int32); int expHashInt = TypeHashingAlgorithms.ComputeNameHashCode("System.Int32"); Assert.Equal(expHashInt, intType.GetHashCode()); int expHashIntByRef = TypeHashingAlgorithms.ComputeByrefTypeHashCode(expHashInt); TypeDesc intByRefType = _context.GetByRefType(intType); Assert.Equal(expHashIntByRef, intByRefType.GetHashCode()); } [Fact] public void TestHashCodeBuilder() { { var builder = new TypeHashingAlgorithms.HashCodeBuilder("Xy"); Assert.Equal(TypeHashingAlgorithms.ComputeNameHashCode("Xy"), builder.ToHashCode()); } { var builder = new TypeHashingAlgorithms.HashCodeBuilder("Xyz"); Assert.Equal(TypeHashingAlgorithms.ComputeNameHashCode("Xyz"), builder.ToHashCode()); } { var builder = new TypeHashingAlgorithms.HashCodeBuilder("Xy"); builder.Append(""); Assert.Equal(TypeHashingAlgorithms.ComputeNameHashCode("Xy"), builder.ToHashCode()); } { var builder = new TypeHashingAlgorithms.HashCodeBuilder("Xy"); builder.Append("."); Assert.Equal(TypeHashingAlgorithms.ComputeNameHashCode("Xy."), builder.ToHashCode()); } { var builder = new TypeHashingAlgorithms.HashCodeBuilder("Xyz"); builder.Append("."); Assert.Equal(TypeHashingAlgorithms.ComputeNameHashCode("Xyz."), builder.ToHashCode()); } { var builder = new TypeHashingAlgorithms.HashCodeBuilder("Xy"); builder.Append(".."); Assert.Equal(TypeHashingAlgorithms.ComputeNameHashCode("Xy.."), builder.ToHashCode()); } { var builder = new TypeHashingAlgorithms.HashCodeBuilder("Xyz"); builder.Append(".."); Assert.Equal(TypeHashingAlgorithms.ComputeNameHashCode("Xyz.."), builder.ToHashCode()); } { var builder = new TypeHashingAlgorithms.HashCodeBuilder("Xy"); builder.Append("."); builder.Append("Ab"); Assert.Equal(TypeHashingAlgorithms.ComputeNameHashCode("Xy.Ab"), builder.ToHashCode()); } { var builder = new TypeHashingAlgorithms.HashCodeBuilder("Xy"); builder.Append("."); builder.Append("Abc"); Assert.Equal(TypeHashingAlgorithms.ComputeNameHashCode("Xy.Abc"), builder.ToHashCode()); } { var builder = new TypeHashingAlgorithms.HashCodeBuilder("Xyz"); builder.Append("."); builder.Append("Abc"); Assert.Equal(TypeHashingAlgorithms.ComputeNameHashCode("Xyz.Abc"), builder.ToHashCode()); } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using Internal.TypeSystem; using Internal.NativeFormat; using Xunit; namespace TypeSystemTests { public class HashcodeTests { TestTypeSystemContext _context; ModuleDesc _testModule; public HashcodeTests() { _context = new TestTypeSystemContext(TargetArchitecture.X64); var systemModule = _context.CreateModuleForSimpleName("CoreTestAssembly"); _context.SetSystemModule(systemModule); _testModule = systemModule; } [Fact] public void TestMultidimensionalArrays() { DefType systemArrayType = _context.GetWellKnownType(WellKnownType.Array); TypeDesc objectType = _context.GetWellKnownType(WellKnownType.Object); ArrayType objectMDArrayRank1 = _context.GetArrayType(objectType, 1); ArrayType objectMDArrayRank2 = _context.GetArrayType(objectType, 2); ArrayType objectMDArrayRank3 = _context.GetArrayType(objectType, 3); Assert.Equal(TypeHashingAlgorithms.ComputeArrayTypeHashCode(objectType.GetHashCode(), 1), objectMDArrayRank1.GetHashCode()); Assert.Equal(TypeHashingAlgorithms.ComputeArrayTypeHashCode(objectType.GetHashCode(), 2), objectMDArrayRank2.GetHashCode()); Assert.Equal(TypeHashingAlgorithms.ComputeArrayTypeHashCode(objectType.GetHashCode(), 3), objectMDArrayRank3.GetHashCode()); } [Fact] public void TestSingleDimensionalArrays() { DefType systemArrayType = _context.GetWellKnownType(WellKnownType.Array); TypeDesc objectType = _context.GetWellKnownType(WellKnownType.Object); ArrayType objectArray = _context.GetArrayType(objectType); Assert.Equal(TypeHashingAlgorithms.ComputeArrayTypeHashCode(objectType.GetHashCode(), -1), objectArray.GetHashCode()); } [Fact] public void TestNonGenericTypes() { DefType systemArrayType = _context.GetWellKnownType(WellKnownType.Array); MetadataType nonNestedType = (MetadataType)_testModule.GetType("Hashcode", "NonNestedType"); TypeDesc nestedType = nonNestedType.GetNestedType("NestedType"); int expectedNonNestedTypeHashcode = TypeHashingAlgorithms.ComputeNameHashCode("Hashcode.NonNestedType"); int expectedNestedTypeNameHashcode = TypeHashingAlgorithms.ComputeNameHashCode("NestedType"); int expectedNestedTypeHashcode = TypeHashingAlgorithms.ComputeNestedTypeHashCode(expectedNonNestedTypeHashcode, expectedNestedTypeNameHashcode); Assert.Equal(expectedNonNestedTypeHashcode, nonNestedType.GetHashCode()); Assert.Equal(expectedNestedTypeHashcode, nestedType.GetHashCode()); } [Fact] void TestGenericTypes() { MetadataType ilistType = (MetadataType)_testModule.GetType("System.Collections.Generic", "IList`1"); DefType systemArrayType = _context.GetWellKnownType(WellKnownType.Array); DefType ilistOfSystemArray = ilistType.MakeInstantiatedType(systemArrayType); int expectedIListOfTHashcode = TypeHashingAlgorithms.ComputeNameHashCode("System.Collections.Generic.IList`1"); int expectedSystemArrayHashcode = TypeHashingAlgorithms.ComputeNameHashCode("System.Array"); Assert.Equal(expectedIListOfTHashcode, ilistType.GetHashCode()); Assert.Equal(TypeHashingAlgorithms.ComputeGenericInstanceHashCode(expectedIListOfTHashcode, new int[] { expectedSystemArrayHashcode }), ilistOfSystemArray.GetHashCode()); } [Fact] public void TestInstantiatedMethods() { MetadataType nonNestedType = (MetadataType)_testModule.GetType("Hashcode", "NonNestedType"); MetadataType genericType = (MetadataType)_testModule.GetType("Hashcode", "GenericType`2"); DefType intType = _context.GetWellKnownType(WellKnownType.Int32); DefType stringType = _context.GetWellKnownType(WellKnownType.String); MetadataType genericTypeOfIntString = genericType.MakeInstantiatedType(intType, stringType); MetadataType genericTypeOfStringInt = genericType.MakeInstantiatedType(stringType, intType); // build up expected hash codes for the above int expHashNonNestedType = TypeHashingAlgorithms.ComputeNameHashCode("Hashcode.NonNestedType"); Assert.Equal(expHashNonNestedType, nonNestedType.GetHashCode()); int expHashGenType = TypeHashingAlgorithms.ComputeNameHashCode("Hashcode.GenericType`2"); Assert.Equal(expHashGenType, genericType.GetHashCode()); int expHashInt = TypeHashingAlgorithms.ComputeNameHashCode("System.Int32"); Assert.Equal(expHashInt, intType.GetHashCode()); int expHashString = TypeHashingAlgorithms.ComputeNameHashCode("System.String"); Assert.Equal(expHashString, stringType.GetHashCode()); int expHashGenTypeOfIS = TypeHashingAlgorithms.ComputeGenericInstanceHashCode(expHashGenType, new int[] { expHashInt, expHashString }); Assert.Equal(expHashGenTypeOfIS, genericTypeOfIntString.GetHashCode()); int expHashGenTypeOfSI = TypeHashingAlgorithms.ComputeGenericInstanceHashCode(expHashGenType, new int[] { expHashString, expHashInt }); Assert.Equal(expHashGenTypeOfSI, genericTypeOfStringInt.GetHashCode()); // Test that instantiated method's have the right hashes int genMethodNameHash = TypeHashingAlgorithms.ComputeNameHashCode("GenericMethod"); int genMethodNameAndIHash = TypeHashingAlgorithms.ComputeGenericInstanceHashCode(genMethodNameHash, new int[] { expHashInt }); int genMethodNameAndSHash = TypeHashingAlgorithms.ComputeGenericInstanceHashCode(genMethodNameHash, new int[] { expHashString }); Action<MetadataType, int> testSequence = (MetadataType typeWithGenericMethod, int expectedTypeHash) => { // Uninstantiated Generic method MethodDesc genMethod = typeWithGenericMethod.GetMethod("GenericMethod", null); Assert.Equal(TypeHashingAlgorithms.ComputeMethodHashCode(expectedTypeHash, genMethodNameHash), genMethod.GetHashCode()); // Instantiated over int MethodDesc genMethodI = genMethod.MakeInstantiatedMethod(intType); Assert.Equal(TypeHashingAlgorithms.ComputeMethodHashCode(expectedTypeHash, genMethodNameAndIHash), genMethodI.GetHashCode()); // Instantiated over string MethodDesc genMethodS = genMethod.MakeInstantiatedMethod(stringType); Assert.Equal(TypeHashingAlgorithms.ComputeMethodHashCode(expectedTypeHash, genMethodNameAndSHash), genMethodS.GetHashCode()); // Assert they aren't the same as the other hashes Assert.NotEqual(genMethodI.GetHashCode(), genMethodS.GetHashCode()); Assert.NotEqual(genMethodI.GetHashCode(), genMethod.GetHashCode()); Assert.NotEqual(genMethodS.GetHashCode(), genMethod.GetHashCode()); }; // Test cases on non-generic type testSequence(nonNestedType, expHashNonNestedType); // Test cases on generic type testSequence(genericType, expHashGenType); // Test cases on instantiated generic type testSequence(genericTypeOfIntString, expHashGenTypeOfIS); testSequence(genericTypeOfStringInt, expHashGenTypeOfSI); } [Fact] public void TestPointerTypes() { DefType intType = _context.GetWellKnownType(WellKnownType.Int32); int expHashInt = TypeHashingAlgorithms.ComputeNameHashCode("System.Int32"); Assert.Equal(expHashInt, intType.GetHashCode()); int expHashIntPointer = TypeHashingAlgorithms.ComputePointerTypeHashCode(expHashInt); TypeDesc intPointerType = _context.GetPointerType(intType); Assert.Equal(expHashIntPointer, intPointerType.GetHashCode()); } [Fact] public void TestFunctionPointerTypes() { DefType intType = _context.GetWellKnownType(WellKnownType.Int32); DefType objectType = _context.GetWellKnownType(WellKnownType.Object); int expHashInt = TypeHashingAlgorithms.ComputeNameHashCode("System.Int32"); int expHashObject = TypeHashingAlgorithms.ComputeNameHashCode("System.Object"); int expHashFnPtr = TypeHashingAlgorithms.ComputeMethodSignatureHashCode(expHashInt, new[] { expHashObject }); MethodSignature fnPtrSig = new MethodSignature(MethodSignatureFlags.None, 0, intType, new TypeDesc[] { objectType }); var fnPtrType = _context.GetFunctionPointerType(fnPtrSig); Assert.Equal(expHashFnPtr, fnPtrType.GetHashCode()); } [Fact] public void TestByRefTypes() { DefType intType = _context.GetWellKnownType(WellKnownType.Int32); int expHashInt = TypeHashingAlgorithms.ComputeNameHashCode("System.Int32"); Assert.Equal(expHashInt, intType.GetHashCode()); int expHashIntByRef = TypeHashingAlgorithms.ComputeByrefTypeHashCode(expHashInt); TypeDesc intByRefType = _context.GetByRefType(intType); Assert.Equal(expHashIntByRef, intByRefType.GetHashCode()); } [Fact] public void TestHashCodeBuilder() { { var builder = new TypeHashingAlgorithms.HashCodeBuilder("Xy"); Assert.Equal(TypeHashingAlgorithms.ComputeNameHashCode("Xy"), builder.ToHashCode()); } { var builder = new TypeHashingAlgorithms.HashCodeBuilder("Xyz"); Assert.Equal(TypeHashingAlgorithms.ComputeNameHashCode("Xyz"), builder.ToHashCode()); } { var builder = new TypeHashingAlgorithms.HashCodeBuilder("Xy"); builder.Append(""); Assert.Equal(TypeHashingAlgorithms.ComputeNameHashCode("Xy"), builder.ToHashCode()); } { var builder = new TypeHashingAlgorithms.HashCodeBuilder("Xy"); builder.Append("."); Assert.Equal(TypeHashingAlgorithms.ComputeNameHashCode("Xy."), builder.ToHashCode()); } { var builder = new TypeHashingAlgorithms.HashCodeBuilder("Xyz"); builder.Append("."); Assert.Equal(TypeHashingAlgorithms.ComputeNameHashCode("Xyz."), builder.ToHashCode()); } { var builder = new TypeHashingAlgorithms.HashCodeBuilder("Xy"); builder.Append(".."); Assert.Equal(TypeHashingAlgorithms.ComputeNameHashCode("Xy.."), builder.ToHashCode()); } { var builder = new TypeHashingAlgorithms.HashCodeBuilder("Xyz"); builder.Append(".."); Assert.Equal(TypeHashingAlgorithms.ComputeNameHashCode("Xyz.."), builder.ToHashCode()); } { var builder = new TypeHashingAlgorithms.HashCodeBuilder("Xy"); builder.Append("."); builder.Append("Ab"); Assert.Equal(TypeHashingAlgorithms.ComputeNameHashCode("Xy.Ab"), builder.ToHashCode()); } { var builder = new TypeHashingAlgorithms.HashCodeBuilder("Xy"); builder.Append("."); builder.Append("Abc"); Assert.Equal(TypeHashingAlgorithms.ComputeNameHashCode("Xy.Abc"), builder.ToHashCode()); } { var builder = new TypeHashingAlgorithms.HashCodeBuilder("Xyz"); builder.Append("."); builder.Append("Abc"); Assert.Equal(TypeHashingAlgorithms.ComputeNameHashCode("Xyz.Abc"), builder.ToHashCode()); } } } }
-1
dotnet/runtime
66,235
Revert "Fix issues related to JsonSerializerOptions mutation and caching."
Reverts dotnet/runtime#65863 Fixes #66232
jkotas
2022-03-05T06:23:45Z
2022-03-05T14:08:07Z
17662fc30cfd4cc6e7dbba978a3fb512380c0b70
3ede8095da51d5d27a890020b61376155e9a61c2
Revert "Fix issues related to JsonSerializerOptions mutation and caching.". Reverts dotnet/runtime#65863 Fixes #66232
./src/tests/JIT/HardwareIntrinsics/Arm/AdvSimd/AbsoluteDifferenceAdd.Vector128.UInt32.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.Arm; namespace JIT.HardwareIntrinsics.Arm { public static partial class Program { private static void AbsoluteDifferenceAdd_Vector128_UInt32() { var test = new SimpleTernaryOpTest__AbsoluteDifferenceAdd_Vector128_UInt32(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); } // Validates passing a static member works test.RunClsVarScenario(); if (AdvSimd.IsSupported) { // Validates passing a static member works, using pinning and Load test.RunClsVarScenario_Load(); } // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local class works, using pinning and Load test.RunClassLclFldScenario_Load(); } // Validates passing an instance member of a class works test.RunClassFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a class works, using pinning and Load test.RunClassFldScenario_Load(); } // Validates passing the field of a local struct works test.RunStructLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local struct works, using pinning and Load test.RunStructLclFldScenario_Load(); } // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a struct works, using pinning and Load test.RunStructFldScenario_Load(); } } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class SimpleTernaryOpTest__AbsoluteDifferenceAdd_Vector128_UInt32 { private struct DataTable { private byte[] inArray1; private byte[] inArray2; private byte[] inArray3; private byte[] outArray; private GCHandle inHandle1; private GCHandle inHandle2; private GCHandle inHandle3; private GCHandle outHandle; private ulong alignment; public DataTable(UInt32[] inArray1, UInt32[] inArray2, UInt32[] inArray3, UInt32[] outArray, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<UInt32>(); int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<UInt32>(); int sizeOfinArray3 = inArray3.Length * Unsafe.SizeOf<UInt32>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<UInt32>(); if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfinArray3 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.inArray2 = new byte[alignment * 2]; this.inArray3 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned); this.inHandle3 = GCHandle.Alloc(this.inArray3, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<UInt32, byte>(ref inArray1[0]), (uint)sizeOfinArray1); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<UInt32, byte>(ref inArray2[0]), (uint)sizeOfinArray2); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray3Ptr), ref Unsafe.As<UInt32, byte>(ref inArray3[0]), (uint)sizeOfinArray3); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray3Ptr => Align((byte*)(inHandle3.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); inHandle2.Free(); inHandle3.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector128<UInt32> _fld1; public Vector128<UInt32> _fld2; public Vector128<UInt32> _fld3; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt32>, byte>(ref testStruct._fld1), ref Unsafe.As<UInt32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<UInt32>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt32>, byte>(ref testStruct._fld2), ref Unsafe.As<UInt32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<UInt32>>()); for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetUInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt32>, byte>(ref testStruct._fld3), ref Unsafe.As<UInt32, byte>(ref _data3[0]), (uint)Unsafe.SizeOf<Vector128<UInt32>>()); return testStruct; } public void RunStructFldScenario(SimpleTernaryOpTest__AbsoluteDifferenceAdd_Vector128_UInt32 testClass) { var result = AdvSimd.AbsoluteDifferenceAdd(_fld1, _fld2, _fld3); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, _fld3, testClass._dataTable.outArrayPtr); } public void RunStructFldScenario_Load(SimpleTernaryOpTest__AbsoluteDifferenceAdd_Vector128_UInt32 testClass) { fixed (Vector128<UInt32>* pFld1 = &_fld1) fixed (Vector128<UInt32>* pFld2 = &_fld2) fixed (Vector128<UInt32>* pFld3 = &_fld3) { var result = AdvSimd.AbsoluteDifferenceAdd( AdvSimd.LoadVector128((UInt32*)(pFld1)), AdvSimd.LoadVector128((UInt32*)(pFld2)), AdvSimd.LoadVector128((UInt32*)(pFld3)) ); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, _fld3, testClass._dataTable.outArrayPtr); } } } private static readonly int LargestVectorSize = 16; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector128<UInt32>>() / sizeof(UInt32); private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector128<UInt32>>() / sizeof(UInt32); private static readonly int Op3ElementCount = Unsafe.SizeOf<Vector128<UInt32>>() / sizeof(UInt32); private static readonly int RetElementCount = Unsafe.SizeOf<Vector128<UInt32>>() / sizeof(UInt32); private static UInt32[] _data1 = new UInt32[Op1ElementCount]; private static UInt32[] _data2 = new UInt32[Op2ElementCount]; private static UInt32[] _data3 = new UInt32[Op3ElementCount]; private static Vector128<UInt32> _clsVar1; private static Vector128<UInt32> _clsVar2; private static Vector128<UInt32> _clsVar3; private Vector128<UInt32> _fld1; private Vector128<UInt32> _fld2; private Vector128<UInt32> _fld3; private DataTable _dataTable; static SimpleTernaryOpTest__AbsoluteDifferenceAdd_Vector128_UInt32() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt32>, byte>(ref _clsVar1), ref Unsafe.As<UInt32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<UInt32>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt32>, byte>(ref _clsVar2), ref Unsafe.As<UInt32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<UInt32>>()); for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetUInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt32>, byte>(ref _clsVar3), ref Unsafe.As<UInt32, byte>(ref _data3[0]), (uint)Unsafe.SizeOf<Vector128<UInt32>>()); } public SimpleTernaryOpTest__AbsoluteDifferenceAdd_Vector128_UInt32() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt32>, byte>(ref _fld1), ref Unsafe.As<UInt32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<UInt32>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt32>, byte>(ref _fld2), ref Unsafe.As<UInt32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<UInt32>>()); for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetUInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt32>, byte>(ref _fld3), ref Unsafe.As<UInt32, byte>(ref _data3[0]), (uint)Unsafe.SizeOf<Vector128<UInt32>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt32(); } for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt32(); } for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetUInt32(); } _dataTable = new DataTable(_data1, _data2, _data3, new UInt32[RetElementCount], LargestVectorSize); } public bool IsSupported => AdvSimd.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = AdvSimd.AbsoluteDifferenceAdd( Unsafe.Read<Vector128<UInt32>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector128<UInt32>>(_dataTable.inArray2Ptr), Unsafe.Read<Vector128<UInt32>>(_dataTable.inArray3Ptr) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); var result = AdvSimd.AbsoluteDifferenceAdd( AdvSimd.LoadVector128((UInt32*)(_dataTable.inArray1Ptr)), AdvSimd.LoadVector128((UInt32*)(_dataTable.inArray2Ptr)), AdvSimd.LoadVector128((UInt32*)(_dataTable.inArray3Ptr)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.AbsoluteDifferenceAdd), new Type[] { typeof(Vector128<UInt32>), typeof(Vector128<UInt32>), typeof(Vector128<UInt32>) }) .Invoke(null, new object[] { Unsafe.Read<Vector128<UInt32>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector128<UInt32>>(_dataTable.inArray2Ptr), Unsafe.Read<Vector128<UInt32>>(_dataTable.inArray3Ptr) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<UInt32>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.AbsoluteDifferenceAdd), new Type[] { typeof(Vector128<UInt32>), typeof(Vector128<UInt32>), typeof(Vector128<UInt32>) }) .Invoke(null, new object[] { AdvSimd.LoadVector128((UInt32*)(_dataTable.inArray1Ptr)), AdvSimd.LoadVector128((UInt32*)(_dataTable.inArray2Ptr)), AdvSimd.LoadVector128((UInt32*)(_dataTable.inArray3Ptr)) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<UInt32>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = AdvSimd.AbsoluteDifferenceAdd( _clsVar1, _clsVar2, _clsVar3 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _clsVar3, _dataTable.outArrayPtr); } public void RunClsVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load)); fixed (Vector128<UInt32>* pClsVar1 = &_clsVar1) fixed (Vector128<UInt32>* pClsVar2 = &_clsVar2) fixed (Vector128<UInt32>* pClsVar3 = &_clsVar3) { var result = AdvSimd.AbsoluteDifferenceAdd( AdvSimd.LoadVector128((UInt32*)(pClsVar1)), AdvSimd.LoadVector128((UInt32*)(pClsVar2)), AdvSimd.LoadVector128((UInt32*)(pClsVar3)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _clsVar3, _dataTable.outArrayPtr); } } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector128<UInt32>>(_dataTable.inArray1Ptr); var op2 = Unsafe.Read<Vector128<UInt32>>(_dataTable.inArray2Ptr); var op3 = Unsafe.Read<Vector128<UInt32>>(_dataTable.inArray3Ptr); var result = AdvSimd.AbsoluteDifferenceAdd(op1, op2, op3); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, op3, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var op1 = AdvSimd.LoadVector128((UInt32*)(_dataTable.inArray1Ptr)); var op2 = AdvSimd.LoadVector128((UInt32*)(_dataTable.inArray2Ptr)); var op3 = AdvSimd.LoadVector128((UInt32*)(_dataTable.inArray3Ptr)); var result = AdvSimd.AbsoluteDifferenceAdd(op1, op2, op3); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, op3, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new SimpleTernaryOpTest__AbsoluteDifferenceAdd_Vector128_UInt32(); var result = AdvSimd.AbsoluteDifferenceAdd(test._fld1, test._fld2, test._fld3); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr); } public void RunClassLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load)); var test = new SimpleTernaryOpTest__AbsoluteDifferenceAdd_Vector128_UInt32(); fixed (Vector128<UInt32>* pFld1 = &test._fld1) fixed (Vector128<UInt32>* pFld2 = &test._fld2) fixed (Vector128<UInt32>* pFld3 = &test._fld3) { var result = AdvSimd.AbsoluteDifferenceAdd( AdvSimd.LoadVector128((UInt32*)(pFld1)), AdvSimd.LoadVector128((UInt32*)(pFld2)), AdvSimd.LoadVector128((UInt32*)(pFld3)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr); } } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = AdvSimd.AbsoluteDifferenceAdd(_fld1, _fld2, _fld3); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _fld3, _dataTable.outArrayPtr); } public void RunClassFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load)); fixed (Vector128<UInt32>* pFld1 = &_fld1) fixed (Vector128<UInt32>* pFld2 = &_fld2) fixed (Vector128<UInt32>* pFld3 = &_fld3) { var result = AdvSimd.AbsoluteDifferenceAdd( AdvSimd.LoadVector128((UInt32*)(pFld1)), AdvSimd.LoadVector128((UInt32*)(pFld2)), AdvSimd.LoadVector128((UInt32*)(pFld3)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _fld3, _dataTable.outArrayPtr); } } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = AdvSimd.AbsoluteDifferenceAdd(test._fld1, test._fld2, test._fld3); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr); } public void RunStructLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load)); var test = TestStruct.Create(); var result = AdvSimd.AbsoluteDifferenceAdd( AdvSimd.LoadVector128((UInt32*)(&test._fld1)), AdvSimd.LoadVector128((UInt32*)(&test._fld2)), AdvSimd.LoadVector128((UInt32*)(&test._fld3)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunStructFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load)); var test = TestStruct.Create(); test.RunStructFldScenario_Load(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector128<UInt32> op1, Vector128<UInt32> op2, Vector128<UInt32> op3, void* result, [CallerMemberName] string method = "") { UInt32[] inArray1 = new UInt32[Op1ElementCount]; UInt32[] inArray2 = new UInt32[Op2ElementCount]; UInt32[] inArray3 = new UInt32[Op3ElementCount]; UInt32[] outArray = new UInt32[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<UInt32, byte>(ref inArray1[0]), op1); Unsafe.WriteUnaligned(ref Unsafe.As<UInt32, byte>(ref inArray2[0]), op2); Unsafe.WriteUnaligned(ref Unsafe.As<UInt32, byte>(ref inArray3[0]), op3); Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<UInt32>>()); ValidateResult(inArray1, inArray2, inArray3, outArray, method); } private void ValidateResult(void* op1, void* op2, void* op3, void* result, [CallerMemberName] string method = "") { UInt32[] inArray1 = new UInt32[Op1ElementCount]; UInt32[] inArray2 = new UInt32[Op2ElementCount]; UInt32[] inArray3 = new UInt32[Op3ElementCount]; UInt32[] outArray = new UInt32[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt32, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector128<UInt32>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt32, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector128<UInt32>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt32, byte>(ref inArray3[0]), ref Unsafe.AsRef<byte>(op3), (uint)Unsafe.SizeOf<Vector128<UInt32>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<UInt32>>()); ValidateResult(inArray1, inArray2, inArray3, outArray, method); } private void ValidateResult(UInt32[] firstOp, UInt32[] secondOp, UInt32[] thirdOp, UInt32[] result, [CallerMemberName] string method = "") { bool succeeded = true; for (var i = 0; i < RetElementCount; i++) { if (Helpers.AbsoluteDifferenceAdd(firstOp[i], secondOp[i], thirdOp[i]) != result[i]) { succeeded = false; break; } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd)}.{nameof(AdvSimd.AbsoluteDifferenceAdd)}<UInt32>(Vector128<UInt32>, Vector128<UInt32>, Vector128<UInt32>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})"); TestLibrary.TestFramework.LogInformation($"secondOp: ({string.Join(", ", secondOp)})"); TestLibrary.TestFramework.LogInformation($" thirdOp: ({string.Join(", ", thirdOp)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.Arm; namespace JIT.HardwareIntrinsics.Arm { public static partial class Program { private static void AbsoluteDifferenceAdd_Vector128_UInt32() { var test = new SimpleTernaryOpTest__AbsoluteDifferenceAdd_Vector128_UInt32(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); } // Validates passing a static member works test.RunClsVarScenario(); if (AdvSimd.IsSupported) { // Validates passing a static member works, using pinning and Load test.RunClsVarScenario_Load(); } // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local class works, using pinning and Load test.RunClassLclFldScenario_Load(); } // Validates passing an instance member of a class works test.RunClassFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a class works, using pinning and Load test.RunClassFldScenario_Load(); } // Validates passing the field of a local struct works test.RunStructLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local struct works, using pinning and Load test.RunStructLclFldScenario_Load(); } // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a struct works, using pinning and Load test.RunStructFldScenario_Load(); } } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class SimpleTernaryOpTest__AbsoluteDifferenceAdd_Vector128_UInt32 { private struct DataTable { private byte[] inArray1; private byte[] inArray2; private byte[] inArray3; private byte[] outArray; private GCHandle inHandle1; private GCHandle inHandle2; private GCHandle inHandle3; private GCHandle outHandle; private ulong alignment; public DataTable(UInt32[] inArray1, UInt32[] inArray2, UInt32[] inArray3, UInt32[] outArray, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<UInt32>(); int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<UInt32>(); int sizeOfinArray3 = inArray3.Length * Unsafe.SizeOf<UInt32>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<UInt32>(); if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfinArray3 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.inArray2 = new byte[alignment * 2]; this.inArray3 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned); this.inHandle3 = GCHandle.Alloc(this.inArray3, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<UInt32, byte>(ref inArray1[0]), (uint)sizeOfinArray1); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<UInt32, byte>(ref inArray2[0]), (uint)sizeOfinArray2); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray3Ptr), ref Unsafe.As<UInt32, byte>(ref inArray3[0]), (uint)sizeOfinArray3); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray3Ptr => Align((byte*)(inHandle3.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); inHandle2.Free(); inHandle3.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector128<UInt32> _fld1; public Vector128<UInt32> _fld2; public Vector128<UInt32> _fld3; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt32>, byte>(ref testStruct._fld1), ref Unsafe.As<UInt32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<UInt32>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt32>, byte>(ref testStruct._fld2), ref Unsafe.As<UInt32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<UInt32>>()); for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetUInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt32>, byte>(ref testStruct._fld3), ref Unsafe.As<UInt32, byte>(ref _data3[0]), (uint)Unsafe.SizeOf<Vector128<UInt32>>()); return testStruct; } public void RunStructFldScenario(SimpleTernaryOpTest__AbsoluteDifferenceAdd_Vector128_UInt32 testClass) { var result = AdvSimd.AbsoluteDifferenceAdd(_fld1, _fld2, _fld3); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, _fld3, testClass._dataTable.outArrayPtr); } public void RunStructFldScenario_Load(SimpleTernaryOpTest__AbsoluteDifferenceAdd_Vector128_UInt32 testClass) { fixed (Vector128<UInt32>* pFld1 = &_fld1) fixed (Vector128<UInt32>* pFld2 = &_fld2) fixed (Vector128<UInt32>* pFld3 = &_fld3) { var result = AdvSimd.AbsoluteDifferenceAdd( AdvSimd.LoadVector128((UInt32*)(pFld1)), AdvSimd.LoadVector128((UInt32*)(pFld2)), AdvSimd.LoadVector128((UInt32*)(pFld3)) ); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, _fld3, testClass._dataTable.outArrayPtr); } } } private static readonly int LargestVectorSize = 16; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector128<UInt32>>() / sizeof(UInt32); private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector128<UInt32>>() / sizeof(UInt32); private static readonly int Op3ElementCount = Unsafe.SizeOf<Vector128<UInt32>>() / sizeof(UInt32); private static readonly int RetElementCount = Unsafe.SizeOf<Vector128<UInt32>>() / sizeof(UInt32); private static UInt32[] _data1 = new UInt32[Op1ElementCount]; private static UInt32[] _data2 = new UInt32[Op2ElementCount]; private static UInt32[] _data3 = new UInt32[Op3ElementCount]; private static Vector128<UInt32> _clsVar1; private static Vector128<UInt32> _clsVar2; private static Vector128<UInt32> _clsVar3; private Vector128<UInt32> _fld1; private Vector128<UInt32> _fld2; private Vector128<UInt32> _fld3; private DataTable _dataTable; static SimpleTernaryOpTest__AbsoluteDifferenceAdd_Vector128_UInt32() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt32>, byte>(ref _clsVar1), ref Unsafe.As<UInt32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<UInt32>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt32>, byte>(ref _clsVar2), ref Unsafe.As<UInt32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<UInt32>>()); for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetUInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt32>, byte>(ref _clsVar3), ref Unsafe.As<UInt32, byte>(ref _data3[0]), (uint)Unsafe.SizeOf<Vector128<UInt32>>()); } public SimpleTernaryOpTest__AbsoluteDifferenceAdd_Vector128_UInt32() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt32>, byte>(ref _fld1), ref Unsafe.As<UInt32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<UInt32>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt32>, byte>(ref _fld2), ref Unsafe.As<UInt32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<UInt32>>()); for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetUInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt32>, byte>(ref _fld3), ref Unsafe.As<UInt32, byte>(ref _data3[0]), (uint)Unsafe.SizeOf<Vector128<UInt32>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt32(); } for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt32(); } for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetUInt32(); } _dataTable = new DataTable(_data1, _data2, _data3, new UInt32[RetElementCount], LargestVectorSize); } public bool IsSupported => AdvSimd.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = AdvSimd.AbsoluteDifferenceAdd( Unsafe.Read<Vector128<UInt32>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector128<UInt32>>(_dataTable.inArray2Ptr), Unsafe.Read<Vector128<UInt32>>(_dataTable.inArray3Ptr) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); var result = AdvSimd.AbsoluteDifferenceAdd( AdvSimd.LoadVector128((UInt32*)(_dataTable.inArray1Ptr)), AdvSimd.LoadVector128((UInt32*)(_dataTable.inArray2Ptr)), AdvSimd.LoadVector128((UInt32*)(_dataTable.inArray3Ptr)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.AbsoluteDifferenceAdd), new Type[] { typeof(Vector128<UInt32>), typeof(Vector128<UInt32>), typeof(Vector128<UInt32>) }) .Invoke(null, new object[] { Unsafe.Read<Vector128<UInt32>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector128<UInt32>>(_dataTable.inArray2Ptr), Unsafe.Read<Vector128<UInt32>>(_dataTable.inArray3Ptr) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<UInt32>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.AbsoluteDifferenceAdd), new Type[] { typeof(Vector128<UInt32>), typeof(Vector128<UInt32>), typeof(Vector128<UInt32>) }) .Invoke(null, new object[] { AdvSimd.LoadVector128((UInt32*)(_dataTable.inArray1Ptr)), AdvSimd.LoadVector128((UInt32*)(_dataTable.inArray2Ptr)), AdvSimd.LoadVector128((UInt32*)(_dataTable.inArray3Ptr)) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<UInt32>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = AdvSimd.AbsoluteDifferenceAdd( _clsVar1, _clsVar2, _clsVar3 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _clsVar3, _dataTable.outArrayPtr); } public void RunClsVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load)); fixed (Vector128<UInt32>* pClsVar1 = &_clsVar1) fixed (Vector128<UInt32>* pClsVar2 = &_clsVar2) fixed (Vector128<UInt32>* pClsVar3 = &_clsVar3) { var result = AdvSimd.AbsoluteDifferenceAdd( AdvSimd.LoadVector128((UInt32*)(pClsVar1)), AdvSimd.LoadVector128((UInt32*)(pClsVar2)), AdvSimd.LoadVector128((UInt32*)(pClsVar3)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _clsVar3, _dataTable.outArrayPtr); } } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector128<UInt32>>(_dataTable.inArray1Ptr); var op2 = Unsafe.Read<Vector128<UInt32>>(_dataTable.inArray2Ptr); var op3 = Unsafe.Read<Vector128<UInt32>>(_dataTable.inArray3Ptr); var result = AdvSimd.AbsoluteDifferenceAdd(op1, op2, op3); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, op3, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var op1 = AdvSimd.LoadVector128((UInt32*)(_dataTable.inArray1Ptr)); var op2 = AdvSimd.LoadVector128((UInt32*)(_dataTable.inArray2Ptr)); var op3 = AdvSimd.LoadVector128((UInt32*)(_dataTable.inArray3Ptr)); var result = AdvSimd.AbsoluteDifferenceAdd(op1, op2, op3); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, op3, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new SimpleTernaryOpTest__AbsoluteDifferenceAdd_Vector128_UInt32(); var result = AdvSimd.AbsoluteDifferenceAdd(test._fld1, test._fld2, test._fld3); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr); } public void RunClassLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load)); var test = new SimpleTernaryOpTest__AbsoluteDifferenceAdd_Vector128_UInt32(); fixed (Vector128<UInt32>* pFld1 = &test._fld1) fixed (Vector128<UInt32>* pFld2 = &test._fld2) fixed (Vector128<UInt32>* pFld3 = &test._fld3) { var result = AdvSimd.AbsoluteDifferenceAdd( AdvSimd.LoadVector128((UInt32*)(pFld1)), AdvSimd.LoadVector128((UInt32*)(pFld2)), AdvSimd.LoadVector128((UInt32*)(pFld3)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr); } } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = AdvSimd.AbsoluteDifferenceAdd(_fld1, _fld2, _fld3); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _fld3, _dataTable.outArrayPtr); } public void RunClassFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load)); fixed (Vector128<UInt32>* pFld1 = &_fld1) fixed (Vector128<UInt32>* pFld2 = &_fld2) fixed (Vector128<UInt32>* pFld3 = &_fld3) { var result = AdvSimd.AbsoluteDifferenceAdd( AdvSimd.LoadVector128((UInt32*)(pFld1)), AdvSimd.LoadVector128((UInt32*)(pFld2)), AdvSimd.LoadVector128((UInt32*)(pFld3)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _fld3, _dataTable.outArrayPtr); } } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = AdvSimd.AbsoluteDifferenceAdd(test._fld1, test._fld2, test._fld3); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr); } public void RunStructLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load)); var test = TestStruct.Create(); var result = AdvSimd.AbsoluteDifferenceAdd( AdvSimd.LoadVector128((UInt32*)(&test._fld1)), AdvSimd.LoadVector128((UInt32*)(&test._fld2)), AdvSimd.LoadVector128((UInt32*)(&test._fld3)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunStructFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load)); var test = TestStruct.Create(); test.RunStructFldScenario_Load(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector128<UInt32> op1, Vector128<UInt32> op2, Vector128<UInt32> op3, void* result, [CallerMemberName] string method = "") { UInt32[] inArray1 = new UInt32[Op1ElementCount]; UInt32[] inArray2 = new UInt32[Op2ElementCount]; UInt32[] inArray3 = new UInt32[Op3ElementCount]; UInt32[] outArray = new UInt32[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<UInt32, byte>(ref inArray1[0]), op1); Unsafe.WriteUnaligned(ref Unsafe.As<UInt32, byte>(ref inArray2[0]), op2); Unsafe.WriteUnaligned(ref Unsafe.As<UInt32, byte>(ref inArray3[0]), op3); Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<UInt32>>()); ValidateResult(inArray1, inArray2, inArray3, outArray, method); } private void ValidateResult(void* op1, void* op2, void* op3, void* result, [CallerMemberName] string method = "") { UInt32[] inArray1 = new UInt32[Op1ElementCount]; UInt32[] inArray2 = new UInt32[Op2ElementCount]; UInt32[] inArray3 = new UInt32[Op3ElementCount]; UInt32[] outArray = new UInt32[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt32, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector128<UInt32>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt32, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector128<UInt32>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt32, byte>(ref inArray3[0]), ref Unsafe.AsRef<byte>(op3), (uint)Unsafe.SizeOf<Vector128<UInt32>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<UInt32>>()); ValidateResult(inArray1, inArray2, inArray3, outArray, method); } private void ValidateResult(UInt32[] firstOp, UInt32[] secondOp, UInt32[] thirdOp, UInt32[] result, [CallerMemberName] string method = "") { bool succeeded = true; for (var i = 0; i < RetElementCount; i++) { if (Helpers.AbsoluteDifferenceAdd(firstOp[i], secondOp[i], thirdOp[i]) != result[i]) { succeeded = false; break; } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd)}.{nameof(AdvSimd.AbsoluteDifferenceAdd)}<UInt32>(Vector128<UInt32>, Vector128<UInt32>, Vector128<UInt32>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})"); TestLibrary.TestFramework.LogInformation($"secondOp: ({string.Join(", ", secondOp)})"); TestLibrary.TestFramework.LogInformation($" thirdOp: ({string.Join(", ", thirdOp)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
-1
dotnet/runtime
66,235
Revert "Fix issues related to JsonSerializerOptions mutation and caching."
Reverts dotnet/runtime#65863 Fixes #66232
jkotas
2022-03-05T06:23:45Z
2022-03-05T14:08:07Z
17662fc30cfd4cc6e7dbba978a3fb512380c0b70
3ede8095da51d5d27a890020b61376155e9a61c2
Revert "Fix issues related to JsonSerializerOptions mutation and caching.". Reverts dotnet/runtime#65863 Fixes #66232
./src/libraries/System.Security.Cryptography/src/System/Security/Cryptography/UniversalCryptoEncryptor.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Diagnostics; using System.Security.Cryptography; using Internal.Cryptography; namespace System.Security.Cryptography { // // A cross-platform ICryptoTransform implementation for encryption. // // - Implements the various padding algorithms (as we support padding algorithms that the underlying native apis don't.) // // - Parameterized by a BasicSymmetricCipher which encapsulates the algorithm, key, IV, chaining mode, direction of encryption // and the underlying native apis implementing the encryption. // internal sealed class UniversalCryptoEncryptor : UniversalCryptoTransform { public UniversalCryptoEncryptor(PaddingMode paddingMode, BasicSymmetricCipher basicSymmetricCipher) : base(paddingMode, basicSymmetricCipher) { } protected override int UncheckedTransformBlock(ReadOnlySpan<byte> inputBuffer, Span<byte> outputBuffer) { return BasicSymmetricCipher.Transform(inputBuffer, outputBuffer); } protected override int UncheckedTransformFinalBlock(ReadOnlySpan<byte> inputBuffer, Span<byte> outputBuffer) { // The only caller of this method is the array-allocating overload, outputBuffer is // always new memory, not a user-provided buffer. Debug.Assert(!inputBuffer.Overlaps(outputBuffer)); int padWritten = SymmetricPadding.PadBlock(inputBuffer, outputBuffer, PaddingSizeBytes, PaddingMode); int transformWritten = BasicSymmetricCipher.TransformFinal(outputBuffer.Slice(0, padWritten), outputBuffer); // After padding, we should have an even number of blocks, and the same applies // to the transform. Debug.Assert(padWritten == transformWritten); return transformWritten; } protected override byte[] UncheckedTransformFinalBlock(byte[] inputBuffer, int inputOffset, int inputCount) { int ciphertextLength = SymmetricPadding.GetCiphertextLength(inputCount, PaddingSizeBytes, PaddingMode); byte[] buffer = GC.AllocateUninitializedArray<byte>(ciphertextLength); int written = UncheckedTransformFinalBlock(inputBuffer.AsSpan(inputOffset, inputCount), buffer); Debug.Assert(written == buffer.Length); return buffer; } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Diagnostics; using System.Security.Cryptography; using Internal.Cryptography; namespace System.Security.Cryptography { // // A cross-platform ICryptoTransform implementation for encryption. // // - Implements the various padding algorithms (as we support padding algorithms that the underlying native apis don't.) // // - Parameterized by a BasicSymmetricCipher which encapsulates the algorithm, key, IV, chaining mode, direction of encryption // and the underlying native apis implementing the encryption. // internal sealed class UniversalCryptoEncryptor : UniversalCryptoTransform { public UniversalCryptoEncryptor(PaddingMode paddingMode, BasicSymmetricCipher basicSymmetricCipher) : base(paddingMode, basicSymmetricCipher) { } protected override int UncheckedTransformBlock(ReadOnlySpan<byte> inputBuffer, Span<byte> outputBuffer) { return BasicSymmetricCipher.Transform(inputBuffer, outputBuffer); } protected override int UncheckedTransformFinalBlock(ReadOnlySpan<byte> inputBuffer, Span<byte> outputBuffer) { // The only caller of this method is the array-allocating overload, outputBuffer is // always new memory, not a user-provided buffer. Debug.Assert(!inputBuffer.Overlaps(outputBuffer)); int padWritten = SymmetricPadding.PadBlock(inputBuffer, outputBuffer, PaddingSizeBytes, PaddingMode); int transformWritten = BasicSymmetricCipher.TransformFinal(outputBuffer.Slice(0, padWritten), outputBuffer); // After padding, we should have an even number of blocks, and the same applies // to the transform. Debug.Assert(padWritten == transformWritten); return transformWritten; } protected override byte[] UncheckedTransformFinalBlock(byte[] inputBuffer, int inputOffset, int inputCount) { int ciphertextLength = SymmetricPadding.GetCiphertextLength(inputCount, PaddingSizeBytes, PaddingMode); byte[] buffer = GC.AllocateUninitializedArray<byte>(ciphertextLength); int written = UncheckedTransformFinalBlock(inputBuffer.AsSpan(inputOffset, inputCount), buffer); Debug.Assert(written == buffer.Length); return buffer; } } }
-1
dotnet/runtime
66,235
Revert "Fix issues related to JsonSerializerOptions mutation and caching."
Reverts dotnet/runtime#65863 Fixes #66232
jkotas
2022-03-05T06:23:45Z
2022-03-05T14:08:07Z
17662fc30cfd4cc6e7dbba978a3fb512380c0b70
3ede8095da51d5d27a890020b61376155e9a61c2
Revert "Fix issues related to JsonSerializerOptions mutation and caching.". Reverts dotnet/runtime#65863 Fixes #66232
./src/native/libs/System.Net.Security.Native/pal_gssapi.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #pragma once #include "pal_config.h" #include "pal_compiler.h" #if HAVE_GSSFW_HEADERS || HAVE_HEIMDAL_HEADERS typedef struct gss_name_t_desc_struct GssName; typedef struct gss_ctx_id_t_desc_struct GssCtxId; typedef struct gss_cred_id_t_desc_struct GssCredId; typedef struct gss_buffer_desc_struct GssBuffer; #else typedef struct gss_name_struct GssName; typedef struct gss_ctx_id_struct GssCtxId; typedef struct gss_cred_id_struct GssCredId; typedef struct gss_buffer_desc_struct GssBuffer; #endif typedef enum { PAL_GSS_COMPLETE = 0, PAL_GSS_CONTINUE_NEEDED = 1 } PAL_GssStatus; typedef enum { PAL_GSS_C_DELEG_FLAG = 0x1, PAL_GSS_C_MUTUAL_FLAG = 0x2, PAL_GSS_C_REPLAY_FLAG = 0x4, PAL_GSS_C_SEQUENCE_FLAG = 0x8, PAL_GSS_C_CONF_FLAG = 0x10, PAL_GSS_C_INTEG_FLAG = 0x20, PAL_GSS_C_ANON_FLAG = 0x40, PAL_GSS_C_PROT_READY_FLAG = 0x80, PAL_GSS_C_TRANS_FLAG = 0x100, PAL_GSS_C_DCE_STYLE = 0x1000, PAL_GSS_C_IDENTIFY_FLAG = 0x2000, PAL_GSS_C_EXTENDED_ERROR_FLAG = 0x4000, PAL_GSS_C_DELEG_POLICY_FLAG = 0x8000 } PAL_GssFlags; /* Issue: #7342 Disable padded warning which occurs in case of 32-bit builds */ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wpadded" typedef struct { uint64_t length; uint8_t* data; } PAL_GssBuffer; #pragma clang diagnostic pop /* Shims the gss_release_buffer method. */ PALEXPORT void NetSecurityNative_ReleaseGssBuffer(void* buffer, uint64_t length); /* Shims the gss_display_status method for minor status (status_type = GSS_C_MECH_CODE). */ PALEXPORT uint32_t NetSecurityNative_DisplayMinorStatus(uint32_t* minorStatus, uint32_t statusValue, PAL_GssBuffer* outBuffer); /* Shims the gss_display_status method for major status (status_type = GSS_C_GSS_CODE). */ PALEXPORT uint32_t NetSecurityNative_DisplayMajorStatus(uint32_t* minorStatus, uint32_t statusValue, PAL_GssBuffer* outBuffer); /* Shims the gss_import_name method with nametype = GSS_C_NT_USER_NAME. */ PALEXPORT uint32_t NetSecurityNative_ImportUserName(uint32_t* minorStatus, char* inputName, uint32_t inputNameLen, GssName** outputName); /* Shims the gss_import_name method with nametype = GSS_C_NT_USER_NAME. */ PALEXPORT uint32_t NetSecurityNative_ImportPrincipalName(uint32_t* minorStatus, char* inputName, uint32_t inputNameLen, GssName** outputName); /* Shims the gss_release_name method. */ PALEXPORT uint32_t NetSecurityNative_ReleaseName(uint32_t* minorStatus, GssName** inputName); /* Shims the gss_acquire_cred method with GSS_C_ACCEPT. */ PALEXPORT uint32_t NetSecurityNative_AcquireAcceptorCred(uint32_t* minorStatus, GssCredId** outputCredHandle); /* Shims the gss_acquire_cred method with SPNEGO oids with GSS_C_INITIATE. */ PALEXPORT uint32_t NetSecurityNative_InitiateCredSpNego(uint32_t* minorStatus, GssName* desiredName, GssCredId** outputCredHandle); /* Shims the gss_release_cred method. */ PALEXPORT uint32_t NetSecurityNative_ReleaseCred(uint32_t* minorStatus, GssCredId** credHandle); /* Shims the gss_init_sec_context method with SPNEGO oids. */ PALEXPORT uint32_t NetSecurityNative_InitSecContext(uint32_t* minorStatus, GssCredId* claimantCredHandle, GssCtxId** contextHandle, uint32_t isNtlm, GssName* targetName, uint32_t reqFlags, uint8_t* inputBytes, uint32_t inputLength, PAL_GssBuffer* outBuffer, uint32_t* retFlags, int32_t* isNtlmUsed); PALEXPORT uint32_t NetSecurityNative_InitSecContextEx(uint32_t* minorStatus, GssCredId* claimantCredHandle, GssCtxId** contextHandle, uint32_t isNtlm, void* cbt, int32_t cbtSize, GssName* targetName, uint32_t reqFlags, uint8_t* inputBytes, uint32_t inputLength, PAL_GssBuffer* outBuffer, uint32_t* retFlags, int32_t* isNtlmUsed); /* Shims the gss_accept_sec_context method. */ PALEXPORT uint32_t NetSecurityNative_AcceptSecContext(uint32_t* minorStatus, GssCredId* acceptorCredHandle, GssCtxId** contextHandle, uint8_t* inputBytes, uint32_t inputLength, PAL_GssBuffer* outBuffer, uint32_t* retFlags, int32_t* isNtlmUsed); /* Shims the gss_delete_sec_context method. */ PALEXPORT uint32_t NetSecurityNative_DeleteSecContext(uint32_t* minorStatus, GssCtxId** contextHandle); /* Shims the gss_wrap method. */ PALEXPORT uint32_t NetSecurityNative_Wrap(uint32_t* minorStatus, GssCtxId* contextHandle, int32_t isEncrypt, uint8_t* inputBytes, int32_t count, PAL_GssBuffer* outBuffer); /* Shims the gss_unwrap method. */ PALEXPORT uint32_t NetSecurityNative_Unwrap(uint32_t* minorStatus, GssCtxId* contextHandle, uint8_t* inputBytes, int32_t offset, int32_t count, PAL_GssBuffer* outBuffer); /* Shims the gss_acquire_cred_with_password method with GSS_C_INITIATE. */ PALEXPORT uint32_t NetSecurityNative_InitiateCredWithPassword(uint32_t* minorStatus, int32_t isNtlm, GssName* desiredName, char* password, uint32_t passwdLen, GssCredId** outputCredHandle); /* Shims the gss_indicate_mechs method to detect if NTLM mech is installed. */ PALEXPORT uint32_t NetSecurityNative_IsNtlmInstalled(void); /* Shims gss_inquire_context and gss_display_name to get the remote user principal name. */ PALEXPORT uint32_t NetSecurityNative_GetUser(uint32_t* minorStatus, GssCtxId* contextHandle, PAL_GssBuffer* outBuffer); /* Performs initialization of GSS shim, if necessary. Return value 0 indicates a success. */ PALEXPORT int32_t NetSecurityNative_EnsureGssInitialized(void);
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #pragma once #include "pal_config.h" #include "pal_compiler.h" #if HAVE_GSSFW_HEADERS || HAVE_HEIMDAL_HEADERS typedef struct gss_name_t_desc_struct GssName; typedef struct gss_ctx_id_t_desc_struct GssCtxId; typedef struct gss_cred_id_t_desc_struct GssCredId; typedef struct gss_buffer_desc_struct GssBuffer; #else typedef struct gss_name_struct GssName; typedef struct gss_ctx_id_struct GssCtxId; typedef struct gss_cred_id_struct GssCredId; typedef struct gss_buffer_desc_struct GssBuffer; #endif typedef enum { PAL_GSS_COMPLETE = 0, PAL_GSS_CONTINUE_NEEDED = 1 } PAL_GssStatus; typedef enum { PAL_GSS_C_DELEG_FLAG = 0x1, PAL_GSS_C_MUTUAL_FLAG = 0x2, PAL_GSS_C_REPLAY_FLAG = 0x4, PAL_GSS_C_SEQUENCE_FLAG = 0x8, PAL_GSS_C_CONF_FLAG = 0x10, PAL_GSS_C_INTEG_FLAG = 0x20, PAL_GSS_C_ANON_FLAG = 0x40, PAL_GSS_C_PROT_READY_FLAG = 0x80, PAL_GSS_C_TRANS_FLAG = 0x100, PAL_GSS_C_DCE_STYLE = 0x1000, PAL_GSS_C_IDENTIFY_FLAG = 0x2000, PAL_GSS_C_EXTENDED_ERROR_FLAG = 0x4000, PAL_GSS_C_DELEG_POLICY_FLAG = 0x8000 } PAL_GssFlags; /* Issue: #7342 Disable padded warning which occurs in case of 32-bit builds */ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wpadded" typedef struct { uint64_t length; uint8_t* data; } PAL_GssBuffer; #pragma clang diagnostic pop /* Shims the gss_release_buffer method. */ PALEXPORT void NetSecurityNative_ReleaseGssBuffer(void* buffer, uint64_t length); /* Shims the gss_display_status method for minor status (status_type = GSS_C_MECH_CODE). */ PALEXPORT uint32_t NetSecurityNative_DisplayMinorStatus(uint32_t* minorStatus, uint32_t statusValue, PAL_GssBuffer* outBuffer); /* Shims the gss_display_status method for major status (status_type = GSS_C_GSS_CODE). */ PALEXPORT uint32_t NetSecurityNative_DisplayMajorStatus(uint32_t* minorStatus, uint32_t statusValue, PAL_GssBuffer* outBuffer); /* Shims the gss_import_name method with nametype = GSS_C_NT_USER_NAME. */ PALEXPORT uint32_t NetSecurityNative_ImportUserName(uint32_t* minorStatus, char* inputName, uint32_t inputNameLen, GssName** outputName); /* Shims the gss_import_name method with nametype = GSS_C_NT_USER_NAME. */ PALEXPORT uint32_t NetSecurityNative_ImportPrincipalName(uint32_t* minorStatus, char* inputName, uint32_t inputNameLen, GssName** outputName); /* Shims the gss_release_name method. */ PALEXPORT uint32_t NetSecurityNative_ReleaseName(uint32_t* minorStatus, GssName** inputName); /* Shims the gss_acquire_cred method with GSS_C_ACCEPT. */ PALEXPORT uint32_t NetSecurityNative_AcquireAcceptorCred(uint32_t* minorStatus, GssCredId** outputCredHandle); /* Shims the gss_acquire_cred method with SPNEGO oids with GSS_C_INITIATE. */ PALEXPORT uint32_t NetSecurityNative_InitiateCredSpNego(uint32_t* minorStatus, GssName* desiredName, GssCredId** outputCredHandle); /* Shims the gss_release_cred method. */ PALEXPORT uint32_t NetSecurityNative_ReleaseCred(uint32_t* minorStatus, GssCredId** credHandle); /* Shims the gss_init_sec_context method with SPNEGO oids. */ PALEXPORT uint32_t NetSecurityNative_InitSecContext(uint32_t* minorStatus, GssCredId* claimantCredHandle, GssCtxId** contextHandle, uint32_t isNtlm, GssName* targetName, uint32_t reqFlags, uint8_t* inputBytes, uint32_t inputLength, PAL_GssBuffer* outBuffer, uint32_t* retFlags, int32_t* isNtlmUsed); PALEXPORT uint32_t NetSecurityNative_InitSecContextEx(uint32_t* minorStatus, GssCredId* claimantCredHandle, GssCtxId** contextHandle, uint32_t isNtlm, void* cbt, int32_t cbtSize, GssName* targetName, uint32_t reqFlags, uint8_t* inputBytes, uint32_t inputLength, PAL_GssBuffer* outBuffer, uint32_t* retFlags, int32_t* isNtlmUsed); /* Shims the gss_accept_sec_context method. */ PALEXPORT uint32_t NetSecurityNative_AcceptSecContext(uint32_t* minorStatus, GssCredId* acceptorCredHandle, GssCtxId** contextHandle, uint8_t* inputBytes, uint32_t inputLength, PAL_GssBuffer* outBuffer, uint32_t* retFlags, int32_t* isNtlmUsed); /* Shims the gss_delete_sec_context method. */ PALEXPORT uint32_t NetSecurityNative_DeleteSecContext(uint32_t* minorStatus, GssCtxId** contextHandle); /* Shims the gss_wrap method. */ PALEXPORT uint32_t NetSecurityNative_Wrap(uint32_t* minorStatus, GssCtxId* contextHandle, int32_t isEncrypt, uint8_t* inputBytes, int32_t count, PAL_GssBuffer* outBuffer); /* Shims the gss_unwrap method. */ PALEXPORT uint32_t NetSecurityNative_Unwrap(uint32_t* minorStatus, GssCtxId* contextHandle, uint8_t* inputBytes, int32_t offset, int32_t count, PAL_GssBuffer* outBuffer); /* Shims the gss_acquire_cred_with_password method with GSS_C_INITIATE. */ PALEXPORT uint32_t NetSecurityNative_InitiateCredWithPassword(uint32_t* minorStatus, int32_t isNtlm, GssName* desiredName, char* password, uint32_t passwdLen, GssCredId** outputCredHandle); /* Shims the gss_indicate_mechs method to detect if NTLM mech is installed. */ PALEXPORT uint32_t NetSecurityNative_IsNtlmInstalled(void); /* Shims gss_inquire_context and gss_display_name to get the remote user principal name. */ PALEXPORT uint32_t NetSecurityNative_GetUser(uint32_t* minorStatus, GssCtxId* contextHandle, PAL_GssBuffer* outBuffer); /* Performs initialization of GSS shim, if necessary. Return value 0 indicates a success. */ PALEXPORT int32_t NetSecurityNative_EnsureGssInitialized(void);
-1
dotnet/runtime
66,235
Revert "Fix issues related to JsonSerializerOptions mutation and caching."
Reverts dotnet/runtime#65863 Fixes #66232
jkotas
2022-03-05T06:23:45Z
2022-03-05T14:08:07Z
17662fc30cfd4cc6e7dbba978a3fb512380c0b70
3ede8095da51d5d27a890020b61376155e9a61c2
Revert "Fix issues related to JsonSerializerOptions mutation and caching.". Reverts dotnet/runtime#65863 Fixes #66232
./src/tests/JIT/Regression/CLR-x86-JIT/V1-M12-Beta2/b70289/b70289.il
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. .assembly extern legacy library mscorlib {} .assembly extern System.Console { .publickeytoken = (B0 3F 5F 7F 11 D5 0A 3A ) .ver 4:0:0:0 } .assembly 'b70289' {} .assembly extern xunit.core {} .class ILGEN_0x8d1cfa06 { .method static int32 Main() { .custom instance void [xunit.core]Xunit.FactAttribute::.ctor() = ( 01 00 00 00 ) .entrypoint .maxstack 20 .try { ldc.i4 0xdbb08029 ldc.i4.s 112 bgt.un Branch_0x2 ldc.r4 float32(0xda3975af) br Branch_0x3 Branch_0x2: ldc.i4 0x74f929bf ldc.i4.3 beq Branch_0x8 ldc.r8 float64(0x206b6e7a4e3b3ee5) br Branch_0x9 Branch_0x8: ldc.r4 float32(0x7c0c8a0c) Branch_0x9: Branch_0x3: ldc.i8 0x9715a29058031ffb conv.ovf.i4 conv.r.un bne.un Branch_0x0 ldc.i4.s -113 br Branch_0x1 Branch_0x0: ldc.i4 100 Branch_0x1: pop leave END } catch [mscorlib]System.OverflowException { pop leave END } END: ldc.i4 100 ret } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. .assembly extern legacy library mscorlib {} .assembly extern System.Console { .publickeytoken = (B0 3F 5F 7F 11 D5 0A 3A ) .ver 4:0:0:0 } .assembly 'b70289' {} .assembly extern xunit.core {} .class ILGEN_0x8d1cfa06 { .method static int32 Main() { .custom instance void [xunit.core]Xunit.FactAttribute::.ctor() = ( 01 00 00 00 ) .entrypoint .maxstack 20 .try { ldc.i4 0xdbb08029 ldc.i4.s 112 bgt.un Branch_0x2 ldc.r4 float32(0xda3975af) br Branch_0x3 Branch_0x2: ldc.i4 0x74f929bf ldc.i4.3 beq Branch_0x8 ldc.r8 float64(0x206b6e7a4e3b3ee5) br Branch_0x9 Branch_0x8: ldc.r4 float32(0x7c0c8a0c) Branch_0x9: Branch_0x3: ldc.i8 0x9715a29058031ffb conv.ovf.i4 conv.r.un bne.un Branch_0x0 ldc.i4.s -113 br Branch_0x1 Branch_0x0: ldc.i4 100 Branch_0x1: pop leave END } catch [mscorlib]System.OverflowException { pop leave END } END: ldc.i4 100 ret } }
-1
dotnet/runtime
66,235
Revert "Fix issues related to JsonSerializerOptions mutation and caching."
Reverts dotnet/runtime#65863 Fixes #66232
jkotas
2022-03-05T06:23:45Z
2022-03-05T14:08:07Z
17662fc30cfd4cc6e7dbba978a3fb512380c0b70
3ede8095da51d5d27a890020b61376155e9a61c2
Revert "Fix issues related to JsonSerializerOptions mutation and caching.". Reverts dotnet/runtime#65863 Fixes #66232
./src/tests/JIT/HardwareIntrinsics/General/Vector256_1/As.SByte.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\General\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; namespace JIT.HardwareIntrinsics.General { public static partial class Program { private static void AsSByte() { var test = new VectorAs__AsSByte(); // Validates basic functionality works test.RunBasicScenario(); // Validates basic functionality works using the generic form, rather than the type-specific form of the method test.RunGenericScenario(); // Validates calling via reflection works test.RunReflectionScenario(); if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class VectorAs__AsSByte { private static readonly int LargestVectorSize = 32; private static readonly int ElementCount = Unsafe.SizeOf<Vector256<SByte>>() / sizeof(SByte); public bool Succeeded { get; set; } = true; public void RunBasicScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario)); Vector256<SByte> value; value = Vector256.Create((sbyte)TestLibrary.Generator.GetSByte()); Vector256<byte> byteResult = value.AsByte(); ValidateResult(byteResult, value); value = Vector256.Create((sbyte)TestLibrary.Generator.GetSByte()); Vector256<double> doubleResult = value.AsDouble(); ValidateResult(doubleResult, value); value = Vector256.Create((sbyte)TestLibrary.Generator.GetSByte()); Vector256<short> shortResult = value.AsInt16(); ValidateResult(shortResult, value); value = Vector256.Create((sbyte)TestLibrary.Generator.GetSByte()); Vector256<int> intResult = value.AsInt32(); ValidateResult(intResult, value); value = Vector256.Create((sbyte)TestLibrary.Generator.GetSByte()); Vector256<long> longResult = value.AsInt64(); ValidateResult(longResult, value); value = Vector256.Create((sbyte)TestLibrary.Generator.GetSByte()); Vector256<sbyte> sbyteResult = value.AsSByte(); ValidateResult(sbyteResult, value); value = Vector256.Create((sbyte)TestLibrary.Generator.GetSByte()); Vector256<float> floatResult = value.AsSingle(); ValidateResult(floatResult, value); value = Vector256.Create((sbyte)TestLibrary.Generator.GetSByte()); Vector256<ushort> ushortResult = value.AsUInt16(); ValidateResult(ushortResult, value); value = Vector256.Create((sbyte)TestLibrary.Generator.GetSByte()); Vector256<uint> uintResult = value.AsUInt32(); ValidateResult(uintResult, value); value = Vector256.Create((sbyte)TestLibrary.Generator.GetSByte()); Vector256<ulong> ulongResult = value.AsUInt64(); ValidateResult(ulongResult, value); } public void RunGenericScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunGenericScenario)); Vector256<SByte> value; value = Vector256.Create((sbyte)TestLibrary.Generator.GetSByte()); Vector256<byte> byteResult = value.As<SByte, byte>(); ValidateResult(byteResult, value); value = Vector256.Create((sbyte)TestLibrary.Generator.GetSByte()); Vector256<double> doubleResult = value.As<SByte, double>(); ValidateResult(doubleResult, value); value = Vector256.Create((sbyte)TestLibrary.Generator.GetSByte()); Vector256<short> shortResult = value.As<SByte, short>(); ValidateResult(shortResult, value); value = Vector256.Create((sbyte)TestLibrary.Generator.GetSByte()); Vector256<int> intResult = value.As<SByte, int>(); ValidateResult(intResult, value); value = Vector256.Create((sbyte)TestLibrary.Generator.GetSByte()); Vector256<long> longResult = value.As<SByte, long>(); ValidateResult(longResult, value); value = Vector256.Create((sbyte)TestLibrary.Generator.GetSByte()); Vector256<sbyte> sbyteResult = value.As<SByte, sbyte>(); ValidateResult(sbyteResult, value); value = Vector256.Create((sbyte)TestLibrary.Generator.GetSByte()); Vector256<float> floatResult = value.As<SByte, float>(); ValidateResult(floatResult, value); value = Vector256.Create((sbyte)TestLibrary.Generator.GetSByte()); Vector256<ushort> ushortResult = value.As<SByte, ushort>(); ValidateResult(ushortResult, value); value = Vector256.Create((sbyte)TestLibrary.Generator.GetSByte()); Vector256<uint> uintResult = value.As<SByte, uint>(); ValidateResult(uintResult, value); value = Vector256.Create((sbyte)TestLibrary.Generator.GetSByte()); Vector256<ulong> ulongResult = value.As<SByte, ulong>(); ValidateResult(ulongResult, value); } public void RunReflectionScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario)); Vector256<SByte> value; value = Vector256.Create((sbyte)TestLibrary.Generator.GetSByte()); object byteResult = typeof(Vector256) .GetMethod(nameof(Vector256.AsByte)) .MakeGenericMethod(typeof(SByte)) .Invoke(null, new object[] { value }); ValidateResult((Vector256<byte>)(byteResult), value); value = Vector256.Create((sbyte)TestLibrary.Generator.GetSByte()); object doubleResult = typeof(Vector256) .GetMethod(nameof(Vector256.AsDouble)) .MakeGenericMethod(typeof(SByte)) .Invoke(null, new object[] { value }); ValidateResult((Vector256<double>)(doubleResult), value); value = Vector256.Create((sbyte)TestLibrary.Generator.GetSByte()); object shortResult = typeof(Vector256) .GetMethod(nameof(Vector256.AsInt16)) .MakeGenericMethod(typeof(SByte)) .Invoke(null, new object[] { value }); ValidateResult((Vector256<short>)(shortResult), value); value = Vector256.Create((sbyte)TestLibrary.Generator.GetSByte()); object intResult = typeof(Vector256) .GetMethod(nameof(Vector256.AsInt32)) .MakeGenericMethod(typeof(SByte)) .Invoke(null, new object[] { value }); ValidateResult((Vector256<int>)(intResult), value); value = Vector256.Create((sbyte)TestLibrary.Generator.GetSByte()); object longResult = typeof(Vector256) .GetMethod(nameof(Vector256.AsInt64)) .MakeGenericMethod(typeof(SByte)) .Invoke(null, new object[] { value }); ValidateResult((Vector256<long>)(longResult), value); value = Vector256.Create((sbyte)TestLibrary.Generator.GetSByte()); object sbyteResult = typeof(Vector256) .GetMethod(nameof(Vector256.AsSByte)) .MakeGenericMethod(typeof(SByte)) .Invoke(null, new object[] { value }); ValidateResult((Vector256<sbyte>)(sbyteResult), value); value = Vector256.Create((sbyte)TestLibrary.Generator.GetSByte()); object floatResult = typeof(Vector256) .GetMethod(nameof(Vector256.AsSingle)) .MakeGenericMethod(typeof(SByte)) .Invoke(null, new object[] { value }); ValidateResult((Vector256<float>)(floatResult), value); value = Vector256.Create((sbyte)TestLibrary.Generator.GetSByte()); object ushortResult = typeof(Vector256) .GetMethod(nameof(Vector256.AsUInt16)) .MakeGenericMethod(typeof(SByte)) .Invoke(null, new object[] { value }); ValidateResult((Vector256<ushort>)(ushortResult), value); value = Vector256.Create((sbyte)TestLibrary.Generator.GetSByte()); object uintResult = typeof(Vector256) .GetMethod(nameof(Vector256.AsUInt32)) .MakeGenericMethod(typeof(SByte)) .Invoke(null, new object[] { value }); ValidateResult((Vector256<uint>)(uintResult), value); value = Vector256.Create((sbyte)TestLibrary.Generator.GetSByte()); object ulongResult = typeof(Vector256) .GetMethod(nameof(Vector256.AsUInt64)) .MakeGenericMethod(typeof(SByte)) .Invoke(null, new object[] { value }); ValidateResult((Vector256<ulong>)(ulongResult), value); } private void ValidateResult<T>(Vector256<T> result, Vector256<SByte> value, [CallerMemberName] string method = "") where T : struct { SByte[] resultElements = new SByte[ElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<SByte, byte>(ref resultElements[0]), result); SByte[] valueElements = new SByte[ElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<SByte, byte>(ref valueElements[0]), value); ValidateResult(resultElements, valueElements, typeof(T), method); } private void ValidateResult(SByte[] resultElements, SByte[] valueElements, Type targetType, [CallerMemberName] string method = "") { bool succeeded = true; for (var i = 0; i < ElementCount; i++) { if (resultElements[i] != valueElements[i]) { succeeded = false; break; } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"Vector256<SByte>.As{targetType.Name}: {method} failed:"); TestLibrary.TestFramework.LogInformation($" value: ({string.Join(", ", valueElements)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", resultElements)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\General\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; namespace JIT.HardwareIntrinsics.General { public static partial class Program { private static void AsSByte() { var test = new VectorAs__AsSByte(); // Validates basic functionality works test.RunBasicScenario(); // Validates basic functionality works using the generic form, rather than the type-specific form of the method test.RunGenericScenario(); // Validates calling via reflection works test.RunReflectionScenario(); if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class VectorAs__AsSByte { private static readonly int LargestVectorSize = 32; private static readonly int ElementCount = Unsafe.SizeOf<Vector256<SByte>>() / sizeof(SByte); public bool Succeeded { get; set; } = true; public void RunBasicScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario)); Vector256<SByte> value; value = Vector256.Create((sbyte)TestLibrary.Generator.GetSByte()); Vector256<byte> byteResult = value.AsByte(); ValidateResult(byteResult, value); value = Vector256.Create((sbyte)TestLibrary.Generator.GetSByte()); Vector256<double> doubleResult = value.AsDouble(); ValidateResult(doubleResult, value); value = Vector256.Create((sbyte)TestLibrary.Generator.GetSByte()); Vector256<short> shortResult = value.AsInt16(); ValidateResult(shortResult, value); value = Vector256.Create((sbyte)TestLibrary.Generator.GetSByte()); Vector256<int> intResult = value.AsInt32(); ValidateResult(intResult, value); value = Vector256.Create((sbyte)TestLibrary.Generator.GetSByte()); Vector256<long> longResult = value.AsInt64(); ValidateResult(longResult, value); value = Vector256.Create((sbyte)TestLibrary.Generator.GetSByte()); Vector256<sbyte> sbyteResult = value.AsSByte(); ValidateResult(sbyteResult, value); value = Vector256.Create((sbyte)TestLibrary.Generator.GetSByte()); Vector256<float> floatResult = value.AsSingle(); ValidateResult(floatResult, value); value = Vector256.Create((sbyte)TestLibrary.Generator.GetSByte()); Vector256<ushort> ushortResult = value.AsUInt16(); ValidateResult(ushortResult, value); value = Vector256.Create((sbyte)TestLibrary.Generator.GetSByte()); Vector256<uint> uintResult = value.AsUInt32(); ValidateResult(uintResult, value); value = Vector256.Create((sbyte)TestLibrary.Generator.GetSByte()); Vector256<ulong> ulongResult = value.AsUInt64(); ValidateResult(ulongResult, value); } public void RunGenericScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunGenericScenario)); Vector256<SByte> value; value = Vector256.Create((sbyte)TestLibrary.Generator.GetSByte()); Vector256<byte> byteResult = value.As<SByte, byte>(); ValidateResult(byteResult, value); value = Vector256.Create((sbyte)TestLibrary.Generator.GetSByte()); Vector256<double> doubleResult = value.As<SByte, double>(); ValidateResult(doubleResult, value); value = Vector256.Create((sbyte)TestLibrary.Generator.GetSByte()); Vector256<short> shortResult = value.As<SByte, short>(); ValidateResult(shortResult, value); value = Vector256.Create((sbyte)TestLibrary.Generator.GetSByte()); Vector256<int> intResult = value.As<SByte, int>(); ValidateResult(intResult, value); value = Vector256.Create((sbyte)TestLibrary.Generator.GetSByte()); Vector256<long> longResult = value.As<SByte, long>(); ValidateResult(longResult, value); value = Vector256.Create((sbyte)TestLibrary.Generator.GetSByte()); Vector256<sbyte> sbyteResult = value.As<SByte, sbyte>(); ValidateResult(sbyteResult, value); value = Vector256.Create((sbyte)TestLibrary.Generator.GetSByte()); Vector256<float> floatResult = value.As<SByte, float>(); ValidateResult(floatResult, value); value = Vector256.Create((sbyte)TestLibrary.Generator.GetSByte()); Vector256<ushort> ushortResult = value.As<SByte, ushort>(); ValidateResult(ushortResult, value); value = Vector256.Create((sbyte)TestLibrary.Generator.GetSByte()); Vector256<uint> uintResult = value.As<SByte, uint>(); ValidateResult(uintResult, value); value = Vector256.Create((sbyte)TestLibrary.Generator.GetSByte()); Vector256<ulong> ulongResult = value.As<SByte, ulong>(); ValidateResult(ulongResult, value); } public void RunReflectionScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario)); Vector256<SByte> value; value = Vector256.Create((sbyte)TestLibrary.Generator.GetSByte()); object byteResult = typeof(Vector256) .GetMethod(nameof(Vector256.AsByte)) .MakeGenericMethod(typeof(SByte)) .Invoke(null, new object[] { value }); ValidateResult((Vector256<byte>)(byteResult), value); value = Vector256.Create((sbyte)TestLibrary.Generator.GetSByte()); object doubleResult = typeof(Vector256) .GetMethod(nameof(Vector256.AsDouble)) .MakeGenericMethod(typeof(SByte)) .Invoke(null, new object[] { value }); ValidateResult((Vector256<double>)(doubleResult), value); value = Vector256.Create((sbyte)TestLibrary.Generator.GetSByte()); object shortResult = typeof(Vector256) .GetMethod(nameof(Vector256.AsInt16)) .MakeGenericMethod(typeof(SByte)) .Invoke(null, new object[] { value }); ValidateResult((Vector256<short>)(shortResult), value); value = Vector256.Create((sbyte)TestLibrary.Generator.GetSByte()); object intResult = typeof(Vector256) .GetMethod(nameof(Vector256.AsInt32)) .MakeGenericMethod(typeof(SByte)) .Invoke(null, new object[] { value }); ValidateResult((Vector256<int>)(intResult), value); value = Vector256.Create((sbyte)TestLibrary.Generator.GetSByte()); object longResult = typeof(Vector256) .GetMethod(nameof(Vector256.AsInt64)) .MakeGenericMethod(typeof(SByte)) .Invoke(null, new object[] { value }); ValidateResult((Vector256<long>)(longResult), value); value = Vector256.Create((sbyte)TestLibrary.Generator.GetSByte()); object sbyteResult = typeof(Vector256) .GetMethod(nameof(Vector256.AsSByte)) .MakeGenericMethod(typeof(SByte)) .Invoke(null, new object[] { value }); ValidateResult((Vector256<sbyte>)(sbyteResult), value); value = Vector256.Create((sbyte)TestLibrary.Generator.GetSByte()); object floatResult = typeof(Vector256) .GetMethod(nameof(Vector256.AsSingle)) .MakeGenericMethod(typeof(SByte)) .Invoke(null, new object[] { value }); ValidateResult((Vector256<float>)(floatResult), value); value = Vector256.Create((sbyte)TestLibrary.Generator.GetSByte()); object ushortResult = typeof(Vector256) .GetMethod(nameof(Vector256.AsUInt16)) .MakeGenericMethod(typeof(SByte)) .Invoke(null, new object[] { value }); ValidateResult((Vector256<ushort>)(ushortResult), value); value = Vector256.Create((sbyte)TestLibrary.Generator.GetSByte()); object uintResult = typeof(Vector256) .GetMethod(nameof(Vector256.AsUInt32)) .MakeGenericMethod(typeof(SByte)) .Invoke(null, new object[] { value }); ValidateResult((Vector256<uint>)(uintResult), value); value = Vector256.Create((sbyte)TestLibrary.Generator.GetSByte()); object ulongResult = typeof(Vector256) .GetMethod(nameof(Vector256.AsUInt64)) .MakeGenericMethod(typeof(SByte)) .Invoke(null, new object[] { value }); ValidateResult((Vector256<ulong>)(ulongResult), value); } private void ValidateResult<T>(Vector256<T> result, Vector256<SByte> value, [CallerMemberName] string method = "") where T : struct { SByte[] resultElements = new SByte[ElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<SByte, byte>(ref resultElements[0]), result); SByte[] valueElements = new SByte[ElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<SByte, byte>(ref valueElements[0]), value); ValidateResult(resultElements, valueElements, typeof(T), method); } private void ValidateResult(SByte[] resultElements, SByte[] valueElements, Type targetType, [CallerMemberName] string method = "") { bool succeeded = true; for (var i = 0; i < ElementCount; i++) { if (resultElements[i] != valueElements[i]) { succeeded = false; break; } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"Vector256<SByte>.As{targetType.Name}: {method} failed:"); TestLibrary.TestFramework.LogInformation($" value: ({string.Join(", ", valueElements)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", resultElements)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
-1
dotnet/runtime
66,235
Revert "Fix issues related to JsonSerializerOptions mutation and caching."
Reverts dotnet/runtime#65863 Fixes #66232
jkotas
2022-03-05T06:23:45Z
2022-03-05T14:08:07Z
17662fc30cfd4cc6e7dbba978a3fb512380c0b70
3ede8095da51d5d27a890020b61376155e9a61c2
Revert "Fix issues related to JsonSerializerOptions mutation and caching.". Reverts dotnet/runtime#65863 Fixes #66232
./src/libraries/System.Private.CoreLib/src/System/Text/SpanRuneEnumerator.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. namespace System.Text { // An enumerator for retrieving System.Text.Rune instances from a ROS<char>. // Methods are pattern-matched by compiler to allow using foreach pattern. public ref struct SpanRuneEnumerator { private ReadOnlySpan<char> _remaining; private Rune _current; internal SpanRuneEnumerator(ReadOnlySpan<char> buffer) { _remaining = buffer; _current = default; } public Rune Current => _current; public SpanRuneEnumerator GetEnumerator() => this; public bool MoveNext() { if (_remaining.IsEmpty) { // reached the end of the buffer _current = default; return false; } int scalarValue = Rune.ReadFirstRuneFromUtf16Buffer(_remaining); if (scalarValue < 0) { // replace invalid sequences with U+FFFD scalarValue = Rune.ReplacementChar.Value; } // In UTF-16 specifically, invalid sequences always have length 1, which is the same // length as the replacement character U+FFFD. This means that we can always bump the // next index by the current scalar's UTF-16 sequence length. This optimization is not // generally applicable; for example, enumerating scalars from UTF-8 cannot utilize // this same trick. _current = Rune.UnsafeCreate((uint)scalarValue); _remaining = _remaining.Slice(_current.Utf16SequenceLength); return true; } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. namespace System.Text { // An enumerator for retrieving System.Text.Rune instances from a ROS<char>. // Methods are pattern-matched by compiler to allow using foreach pattern. public ref struct SpanRuneEnumerator { private ReadOnlySpan<char> _remaining; private Rune _current; internal SpanRuneEnumerator(ReadOnlySpan<char> buffer) { _remaining = buffer; _current = default; } public Rune Current => _current; public SpanRuneEnumerator GetEnumerator() => this; public bool MoveNext() { if (_remaining.IsEmpty) { // reached the end of the buffer _current = default; return false; } int scalarValue = Rune.ReadFirstRuneFromUtf16Buffer(_remaining); if (scalarValue < 0) { // replace invalid sequences with U+FFFD scalarValue = Rune.ReplacementChar.Value; } // In UTF-16 specifically, invalid sequences always have length 1, which is the same // length as the replacement character U+FFFD. This means that we can always bump the // next index by the current scalar's UTF-16 sequence length. This optimization is not // generally applicable; for example, enumerating scalars from UTF-8 cannot utilize // this same trick. _current = Rune.UnsafeCreate((uint)scalarValue); _remaining = _remaining.Slice(_current.Utf16SequenceLength); return true; } } }
-1
dotnet/runtime
66,235
Revert "Fix issues related to JsonSerializerOptions mutation and caching."
Reverts dotnet/runtime#65863 Fixes #66232
jkotas
2022-03-05T06:23:45Z
2022-03-05T14:08:07Z
17662fc30cfd4cc6e7dbba978a3fb512380c0b70
3ede8095da51d5d27a890020b61376155e9a61c2
Revert "Fix issues related to JsonSerializerOptions mutation and caching.". Reverts dotnet/runtime#65863 Fixes #66232
./src/tests/JIT/Methodical/Invoke/ctor/val_ctor_ro.csproj
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <DebugType>None</DebugType> <Optimize>True</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="val_ctor.cs" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <DebugType>None</DebugType> <Optimize>True</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="val_ctor.cs" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,235
Revert "Fix issues related to JsonSerializerOptions mutation and caching."
Reverts dotnet/runtime#65863 Fixes #66232
jkotas
2022-03-05T06:23:45Z
2022-03-05T14:08:07Z
17662fc30cfd4cc6e7dbba978a3fb512380c0b70
3ede8095da51d5d27a890020b61376155e9a61c2
Revert "Fix issues related to JsonSerializerOptions mutation and caching.". Reverts dotnet/runtime#65863 Fixes #66232
./src/tests/JIT/Regression/VS-ia64-JIT/M00/b111192/strswitch2.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; namespace strswitch { internal class Class1 { private static int Main(string[] args) { string[] s = { "one", "two", "three", "four", "five", "six" }; for (int i = 0; i < s.Length; i++) { switch (s[i]) { case "one": Console.WriteLine("s == one"); break; case "two": Console.WriteLine("s == two"); break; case "three": try { Console.WriteLine("s == three"); } catch (Exception e) { Console.WriteLine(e); goto continueloop; // **** adding this will cause the asserts } break; case "four": Console.WriteLine("s == four"); break; case "five": Console.WriteLine("s == five"); break; default: Console.WriteLine("Greater than five"); break; }; continue; continueloop: Console.WriteLine("Continuing"); }; finish: return 100; } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; namespace strswitch { internal class Class1 { private static int Main(string[] args) { string[] s = { "one", "two", "three", "four", "five", "six" }; for (int i = 0; i < s.Length; i++) { switch (s[i]) { case "one": Console.WriteLine("s == one"); break; case "two": Console.WriteLine("s == two"); break; case "three": try { Console.WriteLine("s == three"); } catch (Exception e) { Console.WriteLine(e); goto continueloop; // **** adding this will cause the asserts } break; case "four": Console.WriteLine("s == four"); break; case "five": Console.WriteLine("s == five"); break; default: Console.WriteLine("Greater than five"); break; }; continue; continueloop: Console.WriteLine("Continuing"); }; finish: return 100; } } }
-1
dotnet/runtime
66,235
Revert "Fix issues related to JsonSerializerOptions mutation and caching."
Reverts dotnet/runtime#65863 Fixes #66232
jkotas
2022-03-05T06:23:45Z
2022-03-05T14:08:07Z
17662fc30cfd4cc6e7dbba978a3fb512380c0b70
3ede8095da51d5d27a890020b61376155e9a61c2
Revert "Fix issues related to JsonSerializerOptions mutation and caching.". Reverts dotnet/runtime#65863 Fixes #66232
./src/tests/JIT/IL_Conformance/Old/Base/switch.il
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. .assembly extern legacy library mscorlib {} .assembly 'switch'{} .class public _switch { .method public static int32 main(class [mscorlib]System.String[]) { .entrypoint .maxstack 10 .locals (int32) ldc.i4 0x0 stloc 0 ldc.i4 0x0 TAG: switch (TC1,TC2,TC3) ldloc 0 ldc.i4 0x3 ceq brfalse FAIL br PASS TC1: ldloc 0 ldc.i4 0x1 add stloc 0 ldc.i4 0x1 br TAG TC2: ldloc 0 ldc.i4 0x1 add stloc 0 ldc.i4 0x2 br TAG TC3: ldloc 0 ldc.i4 0x1 add stloc 0 ldc.i4 0x3 br TAG br FAIL PASS: ldc.i4 100 ret FAIL: ldc.i4 0x0 ret } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. .assembly extern legacy library mscorlib {} .assembly 'switch'{} .class public _switch { .method public static int32 main(class [mscorlib]System.String[]) { .entrypoint .maxstack 10 .locals (int32) ldc.i4 0x0 stloc 0 ldc.i4 0x0 TAG: switch (TC1,TC2,TC3) ldloc 0 ldc.i4 0x3 ceq brfalse FAIL br PASS TC1: ldloc 0 ldc.i4 0x1 add stloc 0 ldc.i4 0x1 br TAG TC2: ldloc 0 ldc.i4 0x1 add stloc 0 ldc.i4 0x2 br TAG TC3: ldloc 0 ldc.i4 0x1 add stloc 0 ldc.i4 0x3 br TAG br FAIL PASS: ldc.i4 100 ret FAIL: ldc.i4 0x0 ret } }
-1
dotnet/runtime
66,235
Revert "Fix issues related to JsonSerializerOptions mutation and caching."
Reverts dotnet/runtime#65863 Fixes #66232
jkotas
2022-03-05T06:23:45Z
2022-03-05T14:08:07Z
17662fc30cfd4cc6e7dbba978a3fb512380c0b70
3ede8095da51d5d27a890020b61376155e9a61c2
Revert "Fix issues related to JsonSerializerOptions mutation and caching.". Reverts dotnet/runtime#65863 Fixes #66232
./src/coreclr/nativeaot/Runtime.Base/src/System/Void.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. namespace System { // This class represents the void return type public struct Void { } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. namespace System { // This class represents the void return type public struct Void { } }
-1
dotnet/runtime
66,235
Revert "Fix issues related to JsonSerializerOptions mutation and caching."
Reverts dotnet/runtime#65863 Fixes #66232
jkotas
2022-03-05T06:23:45Z
2022-03-05T14:08:07Z
17662fc30cfd4cc6e7dbba978a3fb512380c0b70
3ede8095da51d5d27a890020b61376155e9a61c2
Revert "Fix issues related to JsonSerializerOptions mutation and caching.". Reverts dotnet/runtime#65863 Fixes #66232
./src/libraries/Microsoft.VisualBasic.Core/src/Microsoft/VisualBasic/CompilerServices/StringType.vb
' Licensed to the .NET Foundation under one or more agreements. ' The .NET Foundation licenses this file to you under the MIT license. Imports System Imports System.Globalization Imports System.Text Imports Microsoft.VisualBasic.CompilerServices.ExceptionUtils Imports Microsoft.VisualBasic.CompilerServices.Utils Namespace Microsoft.VisualBasic.CompilerServices <System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)> _ Public NotInheritable Class StringType ' Prevent creation. Private Sub New() End Sub Private Const GENERAL_FORMAT As String = "G" '============================================================================ ' Coercion to functions. '============================================================================ Public Shared Function FromBoolean(ByVal Value As Boolean) As String If Value Then Return System.Boolean.TrueString Else Return System.Boolean.FalseString End If End Function Public Shared Function FromByte(ByVal Value As Byte) As String Return Value.ToString(Nothing, Nothing) End Function Public Shared Function FromChar(ByVal Value As Char) As String Return Value.ToString() End Function Public Shared Function FromShort(ByVal Value As Short) As String Return Value.ToString(Nothing, Nothing) End Function Public Shared Function FromInteger(ByVal Value As Integer) As String Return Value.ToString(Nothing, Nothing) End Function Public Shared Function FromLong(ByVal Value As Long) As String Return Value.ToString(Nothing, Nothing) End Function Public Shared Function FromSingle(ByVal Value As Single) As String Return FromSingle(Value, Nothing) End Function Public Shared Function FromDouble(ByVal Value As Double) As String Return FromDouble(Value, Nothing) End Function 'Change to this code after the NDP drop includes the formatting changes Public Shared Function FromSingle(ByVal Value As Single, ByVal NumberFormat As NumberFormatInfo) As String Return Value.ToString(Nothing, NumberFormat) End Function Public Shared Function FromDouble(ByVal Value As Double, ByVal NumberFormat As NumberFormatInfo) As String Return Value.ToString("G", NumberFormat) End Function Public Shared Function FromDate(ByVal Value As Date) As String Dim TimeTicks As Long = Value.TimeOfDay.Ticks If (TimeTicks = Value.Ticks) OrElse (Value.Year = 1899 AndAlso Value.Month = 12 AndAlso Value.Day = 30) Then 'OA Date with no date is 1899-12-30 'No date (1/1/1) Return Value.ToString("T", Nothing) ElseIf TimeTicks = 0 Then 'No time, or is midnight Return Value.ToString("d", Nothing) Else Return Value.ToString(GENERAL_FORMAT, Nothing) End If End Function Public Shared Function FromDecimal(ByVal Value As Decimal) As String Return FromDecimal(Value, Nothing) End Function Public Shared Function FromDecimal(ByVal Value As Decimal, ByVal NumberFormat As NumberFormatInfo) As String Return Value.ToString("G", NumberFormat) End Function Public Shared Function FromObject(ByVal Value As Object) As String If Value Is Nothing Then Return Nothing Else Dim StringValue As String = TryCast(Value, String) If StringValue IsNot Nothing Then Return StringValue End If End If Dim ValueInterface As IConvertible Dim ValueTypeCode As TypeCode ValueInterface = TryCast(Value, IConvertible) If Not ValueInterface Is Nothing Then ValueTypeCode = ValueInterface.GetTypeCode() Select Case ValueTypeCode Case TypeCode.Boolean Return FromBoolean(ValueInterface.ToBoolean(Nothing)) Case TypeCode.Byte Return FromByte(ValueInterface.ToByte(Nothing)) Case TypeCode.Int16 Return FromShort(ValueInterface.ToInt16(Nothing)) Case TypeCode.Int32 Return FromInteger(ValueInterface.ToInt32(Nothing)) Case TypeCode.Int64 Return FromLong(ValueInterface.ToInt64(Nothing)) Case TypeCode.Single Return FromSingle(ValueInterface.ToSingle(Nothing)) Case TypeCode.Double Return FromDouble(ValueInterface.ToDouble(Nothing)) Case TypeCode.Decimal Return FromDecimal(ValueInterface.ToDecimal(Nothing)) Case TypeCode.String Return ValueInterface.ToString(Nothing) Case TypeCode.Char Return FromChar(ValueInterface.ToChar(Nothing)) Case TypeCode.DateTime Return FromDate(ValueInterface.ToDateTime(Nothing)) Case Else ' Fall through to error End Select Else Dim CharArray As Char() = TryCast(Value, Char()) If CharArray IsNot Nothing AndAlso CharArray.Rank = 1 Then Return New String(CharArrayType.FromObject(Value)) End If End If Throw New InvalidCastException(SR.Format(SR.InvalidCast_FromTo, VBFriendlyName(Value), "String")) End Function '============================================================================ ' Compare/concat/len functions. '============================================================================ Public Shared Function StrCmp(ByVal sLeft As String, ByVal sRight As String, ByVal TextCompare As Boolean) As Integer If sLeft Is sRight Then Return 0 End If If sLeft Is Nothing Then If sRight.Length() = 0 Then Return 0 End If Return -1 End If If sRight Is Nothing Then If sLeft.Length() = 0 Then Return 0 End If Return 1 End If If TextCompare Then Return GetCultureInfo().CompareInfo.Compare(sLeft, sRight, OptionCompareTextFlags) Else Return System.String.CompareOrdinal(sLeft, sRight) End If End Function Public Shared Function StrLike(ByVal Source As String, ByVal Pattern As String, ByVal CompareOption As CompareMethod) As Boolean If CompareOption = CompareMethod.Binary Then Return StrLikeBinary(Source, Pattern) Else Return StrLikeText(Source, Pattern) End If End Function Public Shared Function StrLikeBinary(ByVal Source As String, ByVal Pattern As String) As Boolean 'Match Source to Pattern using "?*#[!a-g]" pattern matching characters Dim SourceIndex As Integer Dim PatternIndex As Integer Dim SourceEndIndex As Integer Dim PatternEndIndex As Integer Dim p As Char Dim s As Char Dim InsideBracket As Boolean Dim SeenHyphen As Boolean Dim StartRangeChar As Char Dim EndRangeChar As Char Dim Match As Boolean Dim SeenLiteral As Boolean Dim SeenNot As Boolean Dim Skip As Integer Const NullChar As Char = ChrW(0) Dim LiteralIsRangeEnd As Boolean = False ' Options = CompareOptions.Ordinal If Pattern Is Nothing Then PatternEndIndex = 0 Else PatternEndIndex = Pattern.Length End If If Source Is Nothing Then SourceEndIndex = 0 Else SourceEndIndex = Source.Length End If If SourceIndex < SourceEndIndex Then s = Source.Chars(SourceIndex) End If Do While PatternIndex < PatternEndIndex p = Pattern.Chars(PatternIndex) If p = "*"c AndAlso (Not InsideBracket) Then 'If Then Else has faster performance the Select Case 'Determine how many source chars to skip Skip = AsteriskSkip(Pattern.Substring(PatternIndex + 1), Source.Substring(SourceIndex), SourceEndIndex - SourceIndex, CompareMethod.Binary, m_InvariantCompareInfo) If Skip < 0 Then Return False ElseIf Skip > 0 Then SourceIndex += Skip If SourceIndex < SourceEndIndex Then s = Source.Chars(SourceIndex) End If End If ElseIf p = "?"c AndAlso (Not InsideBracket) Then 'Match any character SourceIndex = SourceIndex + 1 If SourceIndex < SourceEndIndex Then s = Source.Chars(SourceIndex) End If ElseIf p = "#"c AndAlso (Not InsideBracket) Then If Not System.Char.IsDigit(s) Then Exit Do End If SourceIndex = SourceIndex + 1 If SourceIndex < SourceEndIndex Then s = Source.Chars(SourceIndex) End If ElseIf p = "-"c AndAlso _ (InsideBracket AndAlso SeenLiteral AndAlso (Not LiteralIsRangeEnd) AndAlso (Not SeenHyphen)) AndAlso _ (((PatternIndex + 1) >= PatternEndIndex) OrElse (Pattern.Chars(PatternIndex + 1) <> "]"c)) Then SeenHyphen = True ElseIf p = "!"c AndAlso _ (InsideBracket AndAlso (Not SeenNot)) Then SeenNot = True Match = True ElseIf p = "["c AndAlso (Not InsideBracket) Then InsideBracket = True StartRangeChar = NullChar EndRangeChar = NullChar SeenLiteral = False ElseIf p = "]"c AndAlso InsideBracket Then InsideBracket = False If SeenLiteral Then If Match Then SourceIndex += 1 If SourceIndex < SourceEndIndex Then s = Source.Chars(SourceIndex) End If Else Exit Do End If ElseIf SeenHyphen Then If Not Match Then Exit Do End If ElseIf SeenNot Then '[!] should be matched to literal ! same as if outside brackets If "!"c <> s Then Exit Do End If SourceIndex += 1 If SourceIndex < SourceEndIndex Then s = Source.Chars(SourceIndex) End If End If Match = False SeenLiteral = False SeenNot = False SeenHyphen = False Else 'Literal character SeenLiteral = True LiteralIsRangeEnd = False If InsideBracket Then If SeenHyphen Then SeenHyphen = False LiteralIsRangeEnd = True EndRangeChar = p If StartRangeChar > EndRangeChar Then Throw VbMakeException(vbErrors.BadPatStr) ElseIf (SeenNot AndAlso Match) OrElse (Not SeenNot AndAlso Not Match) Then 'Calls to ci.Compare are expensive, avoid them for good performance Match = (s > StartRangeChar) AndAlso (s <= EndRangeChar) If SeenNot Then Match = Not Match End If End If Else StartRangeChar = p 'This compare handles non range chars such as the "abc" and "uvw" 'and the first char of a range such as "d" in "[abcd-tuvw]". Match = StrLikeCompareBinary(SeenNot, Match, p, s) End If Else If p <> s AndAlso Not SeenNot Then Exit Do End If SeenNot = False SourceIndex += 1 If SourceIndex < SourceEndIndex Then s = Source.Chars(SourceIndex) ElseIf SourceIndex > SourceEndIndex Then Return False End If End If End If PatternIndex += 1 Loop If InsideBracket Then If SourceEndIndex = 0 Then Return False Else Throw New ArgumentException(SR.Format(SR.Argument_InvalidValue1, "Pattern")) End If Else Return (PatternIndex = PatternEndIndex) AndAlso (SourceIndex = SourceEndIndex) End If End Function Public Shared Function StrLikeText(ByVal Source As String, ByVal Pattern As String) As Boolean 'Match Source to Pattern using "?*#[!a-g]" pattern matching characters Dim SourceIndex As Integer Dim PatternIndex As Integer Dim SourceEndIndex As Integer Dim PatternEndIndex As Integer Dim p As Char Dim s As Char Dim InsideBracket As Boolean Dim SeenHyphen As Boolean Dim StartRangeChar As Char Dim EndRangeChar As Char Dim Match As Boolean Dim SeenLiteral As Boolean Dim SeenNot As Boolean Dim Skip As Integer Dim Options As CompareOptions Dim ci As CompareInfo Const NullChar As Char = ChrW(0) Dim LiteralIsRangeEnd As Boolean = False If Pattern Is Nothing Then PatternEndIndex = 0 Else PatternEndIndex = Pattern.Length End If If Source Is Nothing Then SourceEndIndex = 0 Else SourceEndIndex = Source.Length End If If SourceIndex < SourceEndIndex Then s = Source.Chars(SourceIndex) End If ci = GetCultureInfo().CompareInfo Options = CompareOptions.IgnoreCase Or _ CompareOptions.IgnoreWidth Or _ CompareOptions.IgnoreNonSpace Or _ CompareOptions.IgnoreKanaType Do While PatternIndex < PatternEndIndex p = Pattern.Chars(PatternIndex) If p = "*"c AndAlso (Not InsideBracket) Then 'If Then Else has faster performance the Select Case 'Determine how many source chars to skip Skip = AsteriskSkip(Pattern.Substring(PatternIndex + 1), Source.Substring(SourceIndex), SourceEndIndex - SourceIndex, CompareMethod.Text, ci) If Skip < 0 Then Return False ElseIf Skip > 0 Then SourceIndex += Skip If SourceIndex < SourceEndIndex Then s = Source.Chars(SourceIndex) End If End If ElseIf p = "?"c AndAlso (Not InsideBracket) Then 'Match any character SourceIndex = SourceIndex + 1 If SourceIndex < SourceEndIndex Then s = Source.Chars(SourceIndex) End If ElseIf p = "#"c AndAlso (Not InsideBracket) Then If Not System.Char.IsDigit(s) Then Exit Do End If SourceIndex = SourceIndex + 1 If SourceIndex < SourceEndIndex Then s = Source.Chars(SourceIndex) End If ElseIf p = "-"c AndAlso _ (InsideBracket AndAlso SeenLiteral AndAlso (Not LiteralIsRangeEnd) AndAlso (Not SeenHyphen)) AndAlso _ (((PatternIndex + 1) >= PatternEndIndex) OrElse (Pattern.Chars(PatternIndex + 1) <> "]"c)) Then SeenHyphen = True ElseIf p = "!"c AndAlso _ (InsideBracket AndAlso Not SeenNot) Then SeenNot = True Match = True ElseIf p = "["c AndAlso (Not InsideBracket) Then InsideBracket = True StartRangeChar = NullChar EndRangeChar = NullChar SeenLiteral = False ElseIf p = "]"c AndAlso InsideBracket Then InsideBracket = False If SeenLiteral Then If Match Then SourceIndex += 1 If SourceIndex < SourceEndIndex Then s = Source.Chars(SourceIndex) End If Else Exit Do End If ElseIf SeenHyphen Then If Not Match Then Exit Do End If ElseIf SeenNot Then '[!] should be matched to literal ! same as if outside brackets If (ci.Compare("!", s) <> 0) Then Exit Do End If SourceIndex += 1 If SourceIndex < SourceEndIndex Then s = Source.Chars(SourceIndex) End If End If Match = False SeenLiteral = False SeenNot = False SeenHyphen = False Else 'Literal character SeenLiteral = True LiteralIsRangeEnd = False If InsideBracket Then If SeenHyphen Then SeenHyphen = False LiteralIsRangeEnd = True EndRangeChar = p If StartRangeChar > EndRangeChar Then Throw VbMakeException(vbErrors.BadPatStr) ElseIf (SeenNot AndAlso Match) OrElse (Not SeenNot AndAlso Not Match) Then 'Calls to ci.Compare are expensive, avoid them for good performance If Options = CompareOptions.Ordinal Then Match = (s > StartRangeChar) AndAlso (s <= EndRangeChar) Else Match = (ci.Compare(StartRangeChar, s, Options) < 0) AndAlso (ci.Compare(EndRangeChar, s, Options) >= 0) End If If SeenNot Then Match = Not Match End If End If Else StartRangeChar = p 'This compare handles non range chars such as the "abc" and "uvw" 'and the first char of a range such as "d" in "[abcd-tuvw]". Match = StrLikeCompare(ci, SeenNot, Match, p, s, Options) End If Else If Options = CompareOptions.Ordinal Then If p <> s AndAlso Not SeenNot Then Exit Do End If Else ' Slurp up the diacritical marks, if any (both non-spacing marks and modifier symbols) ' Note that typically, we'll only have at most one diacritical mark. Therefore, I'm not ' using StringBuilder here, since the minimal overhead of appending a character doesn't ' justify invoking a couple of instances of StringBuilder. . Dim pstr As String = p Dim sstr As String = s Do While PatternIndex + 1 < PatternEndIndex AndAlso _ (UnicodeCategory.ModifierSymbol = Char.GetUnicodeCategory(Pattern.Chars(PatternIndex + 1)) OrElse _ UnicodeCategory.NonSpacingMark = Char.GetUnicodeCategory(Pattern.Chars(PatternIndex + 1))) pstr = pstr & Pattern.Chars(PatternIndex + 1) PatternIndex = PatternIndex + 1 Loop Do While SourceIndex + 1 < SourceEndIndex AndAlso _ (UnicodeCategory.ModifierSymbol = Char.GetUnicodeCategory(Source.Chars(SourceIndex + 1)) OrElse _ UnicodeCategory.NonSpacingMark = Char.GetUnicodeCategory(Source.Chars(SourceIndex + 1))) sstr = sstr & Source.Chars(SourceIndex + 1) SourceIndex = SourceIndex + 1 Loop If (ci.Compare(pstr, sstr, OptionCompareTextFlags) <> 0) AndAlso Not SeenNot Then Exit Do End If End If SeenNot = False SourceIndex += 1 If SourceIndex < SourceEndIndex Then s = Source.Chars(SourceIndex) ElseIf SourceIndex > SourceEndIndex Then Return False End If End If End If PatternIndex += 1 Loop If InsideBracket Then If SourceEndIndex = 0 Then Return False Else Throw New ArgumentException(SR.Format(SR.Argument_InvalidValue1, "Pattern")) End If Else Return (PatternIndex = PatternEndIndex) AndAlso (SourceIndex = SourceEndIndex) End If End Function Private Shared Function StrLikeCompareBinary(ByVal SeenNot As Boolean, ByVal Match As Boolean, ByVal p As Char, ByVal s As Char) As Boolean If SeenNot AndAlso Match Then Return p <> s ElseIf Not SeenNot AndAlso Not Match Then Return p = s Else Return Match End If End Function Private Shared Function StrLikeCompare(ByVal ci As CompareInfo, ByVal SeenNot As Boolean, ByVal Match As Boolean, ByVal p As Char, ByVal s As Char, ByVal Options As CompareOptions) As Boolean If SeenNot AndAlso Match Then If Options = CompareOptions.Ordinal Then Return p <> s Else Return Not (ci.Compare(p, s, Options) = 0) End If ElseIf Not SeenNot AndAlso Not Match Then If Options = CompareOptions.Ordinal Then Return p = s Else Return (ci.Compare(p, s, Options) = 0) End If Else Return Match End If End Function Private Shared Function AsteriskSkip(ByVal Pattern As String, ByVal Source As String, ByVal SourceEndIndex As Integer, _ ByVal CompareOption As CompareMethod, ByVal ci As CompareInfo) As Integer 'Returns the number of source characters to skip over to handle an asterisk in the pattern. 'When there's only a single asterisk in the pattern, it computes how many pattern equivalent chars 'follow the *: [a-z], [abcde], ?, # each count as one char. 'Pattern contains the substring following the * 'Source contains the substring not yet matched. Dim p As Char Dim SeenLiteral As Boolean Dim SeenSpecial As Boolean 'Remembers if we've seen #, ?, [abd-eg], or ! when they have their special meanings Dim InsideBracket As Boolean Dim Count As Integer Dim PatternEndIndex As Integer Dim PatternIndex As Integer Dim TruncatedPattern As String Dim Options As CompareOptions PatternEndIndex = Len(Pattern) 'Determine how many pattern equivalent chars follow the *, and if there are multiple *s '[a-z], [abcde] each count as one char. Do While PatternIndex < PatternEndIndex p = Pattern.Chars(PatternIndex) Select Case p Case "*"c If Count > 0 Then 'We found multiple asterisks with an intervening pattern If SeenSpecial Then 'Pattern uses special characters which means we can't compute easily how far to skip. Count = MultipleAsteriskSkip(Pattern, Source, Count, CompareOption) Return SourceEndIndex - Count Else 'Pattern uses only literals, so we can directly search for the pattern in the source TruncatedPattern = Pattern.Substring(0, PatternIndex) 'Remove the second * and everything trailing If CompareOption = CompareMethod.Binary Then Options = CompareOptions.Ordinal Else Options = CompareOptions.IgnoreCase Or CompareOptions.IgnoreWidth Or CompareOptions.IgnoreNonSpace Or CompareOptions.IgnoreKanaType End If 'Count = Source.LastIndexOf(TruncatedPattern) Count = ci.LastIndexOf(Source, TruncatedPattern, Options) Return Count End If Else 'Do nothing, which colalesces multiple asterisks together End If Case "-"c If Pattern.Chars(PatternIndex + 1) = "]"c Then SeenLiteral = True End If Case "!"c If Pattern.Chars(PatternIndex + 1) = "]"c Then SeenLiteral = True Else SeenSpecial = True End If Case "["c If InsideBracket Then SeenLiteral = True Else InsideBracket = True End If Case "]"c If SeenLiteral OrElse Not InsideBracket Then Count += 1 SeenSpecial = True End If SeenLiteral = False InsideBracket = False Case "?"c, "#"c If InsideBracket Then SeenLiteral = True Else Count += 1 SeenSpecial = True End If Case Else If InsideBracket Then SeenLiteral = True Else Count += 1 End If End Select PatternIndex += 1 Loop Return SourceEndIndex - Count End Function Private Shared Function MultipleAsteriskSkip(ByVal Pattern As String, ByVal Source As String, ByVal Count As Integer, ByVal CompareOption As CompareMethod) As Integer 'Multiple asterisks with intervening chars were found in the pattern, such as "*<chars>*". 'Use a recursive approach to determine how many source chars to skip. 'Start near the end of Source and move backwards one char at a time until a match is found or we reach start of Source. Dim SourceEndIndex As Integer Dim NewSource As String Dim Result As Boolean SourceEndIndex = Len(Source) Do While Count < SourceEndIndex NewSource = Source.Substring(SourceEndIndex - Count) Try Result = StrLike(NewSource, Pattern, CompareOption) Catch ex As StackOverflowException Throw ex Catch ex As OutOfMemoryException Throw ex Catch Result = False End Try If Result Then Exit Do End If Count += 1 Loop Return Count End Function Public Shared Sub MidStmtStr(ByRef sDest As String, ByVal StartPosition As Integer, ByVal MaxInsertLength As Integer, ByVal sInsert As String) Dim DestLength As Integer Dim InsertLength As Integer Dim EndSegmentLength As Integer If sDest Is Nothing Then 'DestLength = 0 Else DestLength = sDest.Length End If If sInsert Is Nothing Then 'InsertLength = 0 Else InsertLength = sInsert.Length End If 'Zero base the index StartPosition -= 1 If StartPosition < 0 OrElse StartPosition >= DestLength Then Throw New ArgumentException(SR.Format(SR.Argument_InvalidValue1, "Start")) End If If MaxInsertLength < 0 Then Throw New ArgumentException(SR.Format(SR.Argument_InvalidValue1, "Length")) End If ' first, limit the length of the source string ' to lenChange If (InsertLength > MaxInsertLength) Then InsertLength = MaxInsertLength End If ' second, limit the length to the available space ' in the destination string If (InsertLength > DestLength - StartPosition) Then InsertLength = DestLength - StartPosition End If If InsertLength = 0 Then 'Destination string remains unchanged Exit Sub End If 'This looks a bit complex for removing and inserting strings 'but when manipulating long strings, it should provide 'better performance because of fewer memcpys Dim sb As StringBuilder sb = New StringBuilder(DestLength) If StartPosition > 0 Then 'Append first part of destination string sb.Append(sDest, 0, StartPosition) End If 'Append InsertString sb.Append(sInsert, 0, InsertLength) EndSegmentLength = DestLength - (StartPosition + InsertLength) If EndSegmentLength > 0 Then 'Append remainder of destination string sb.Append(sDest, StartPosition + InsertLength, EndSegmentLength) End If sDest = sb.ToString() End Sub End Class End Namespace
' Licensed to the .NET Foundation under one or more agreements. ' The .NET Foundation licenses this file to you under the MIT license. Imports System Imports System.Globalization Imports System.Text Imports Microsoft.VisualBasic.CompilerServices.ExceptionUtils Imports Microsoft.VisualBasic.CompilerServices.Utils Namespace Microsoft.VisualBasic.CompilerServices <System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)> _ Public NotInheritable Class StringType ' Prevent creation. Private Sub New() End Sub Private Const GENERAL_FORMAT As String = "G" '============================================================================ ' Coercion to functions. '============================================================================ Public Shared Function FromBoolean(ByVal Value As Boolean) As String If Value Then Return System.Boolean.TrueString Else Return System.Boolean.FalseString End If End Function Public Shared Function FromByte(ByVal Value As Byte) As String Return Value.ToString(Nothing, Nothing) End Function Public Shared Function FromChar(ByVal Value As Char) As String Return Value.ToString() End Function Public Shared Function FromShort(ByVal Value As Short) As String Return Value.ToString(Nothing, Nothing) End Function Public Shared Function FromInteger(ByVal Value As Integer) As String Return Value.ToString(Nothing, Nothing) End Function Public Shared Function FromLong(ByVal Value As Long) As String Return Value.ToString(Nothing, Nothing) End Function Public Shared Function FromSingle(ByVal Value As Single) As String Return FromSingle(Value, Nothing) End Function Public Shared Function FromDouble(ByVal Value As Double) As String Return FromDouble(Value, Nothing) End Function 'Change to this code after the NDP drop includes the formatting changes Public Shared Function FromSingle(ByVal Value As Single, ByVal NumberFormat As NumberFormatInfo) As String Return Value.ToString(Nothing, NumberFormat) End Function Public Shared Function FromDouble(ByVal Value As Double, ByVal NumberFormat As NumberFormatInfo) As String Return Value.ToString("G", NumberFormat) End Function Public Shared Function FromDate(ByVal Value As Date) As String Dim TimeTicks As Long = Value.TimeOfDay.Ticks If (TimeTicks = Value.Ticks) OrElse (Value.Year = 1899 AndAlso Value.Month = 12 AndAlso Value.Day = 30) Then 'OA Date with no date is 1899-12-30 'No date (1/1/1) Return Value.ToString("T", Nothing) ElseIf TimeTicks = 0 Then 'No time, or is midnight Return Value.ToString("d", Nothing) Else Return Value.ToString(GENERAL_FORMAT, Nothing) End If End Function Public Shared Function FromDecimal(ByVal Value As Decimal) As String Return FromDecimal(Value, Nothing) End Function Public Shared Function FromDecimal(ByVal Value As Decimal, ByVal NumberFormat As NumberFormatInfo) As String Return Value.ToString("G", NumberFormat) End Function Public Shared Function FromObject(ByVal Value As Object) As String If Value Is Nothing Then Return Nothing Else Dim StringValue As String = TryCast(Value, String) If StringValue IsNot Nothing Then Return StringValue End If End If Dim ValueInterface As IConvertible Dim ValueTypeCode As TypeCode ValueInterface = TryCast(Value, IConvertible) If Not ValueInterface Is Nothing Then ValueTypeCode = ValueInterface.GetTypeCode() Select Case ValueTypeCode Case TypeCode.Boolean Return FromBoolean(ValueInterface.ToBoolean(Nothing)) Case TypeCode.Byte Return FromByte(ValueInterface.ToByte(Nothing)) Case TypeCode.Int16 Return FromShort(ValueInterface.ToInt16(Nothing)) Case TypeCode.Int32 Return FromInteger(ValueInterface.ToInt32(Nothing)) Case TypeCode.Int64 Return FromLong(ValueInterface.ToInt64(Nothing)) Case TypeCode.Single Return FromSingle(ValueInterface.ToSingle(Nothing)) Case TypeCode.Double Return FromDouble(ValueInterface.ToDouble(Nothing)) Case TypeCode.Decimal Return FromDecimal(ValueInterface.ToDecimal(Nothing)) Case TypeCode.String Return ValueInterface.ToString(Nothing) Case TypeCode.Char Return FromChar(ValueInterface.ToChar(Nothing)) Case TypeCode.DateTime Return FromDate(ValueInterface.ToDateTime(Nothing)) Case Else ' Fall through to error End Select Else Dim CharArray As Char() = TryCast(Value, Char()) If CharArray IsNot Nothing AndAlso CharArray.Rank = 1 Then Return New String(CharArrayType.FromObject(Value)) End If End If Throw New InvalidCastException(SR.Format(SR.InvalidCast_FromTo, VBFriendlyName(Value), "String")) End Function '============================================================================ ' Compare/concat/len functions. '============================================================================ Public Shared Function StrCmp(ByVal sLeft As String, ByVal sRight As String, ByVal TextCompare As Boolean) As Integer If sLeft Is sRight Then Return 0 End If If sLeft Is Nothing Then If sRight.Length() = 0 Then Return 0 End If Return -1 End If If sRight Is Nothing Then If sLeft.Length() = 0 Then Return 0 End If Return 1 End If If TextCompare Then Return GetCultureInfo().CompareInfo.Compare(sLeft, sRight, OptionCompareTextFlags) Else Return System.String.CompareOrdinal(sLeft, sRight) End If End Function Public Shared Function StrLike(ByVal Source As String, ByVal Pattern As String, ByVal CompareOption As CompareMethod) As Boolean If CompareOption = CompareMethod.Binary Then Return StrLikeBinary(Source, Pattern) Else Return StrLikeText(Source, Pattern) End If End Function Public Shared Function StrLikeBinary(ByVal Source As String, ByVal Pattern As String) As Boolean 'Match Source to Pattern using "?*#[!a-g]" pattern matching characters Dim SourceIndex As Integer Dim PatternIndex As Integer Dim SourceEndIndex As Integer Dim PatternEndIndex As Integer Dim p As Char Dim s As Char Dim InsideBracket As Boolean Dim SeenHyphen As Boolean Dim StartRangeChar As Char Dim EndRangeChar As Char Dim Match As Boolean Dim SeenLiteral As Boolean Dim SeenNot As Boolean Dim Skip As Integer Const NullChar As Char = ChrW(0) Dim LiteralIsRangeEnd As Boolean = False ' Options = CompareOptions.Ordinal If Pattern Is Nothing Then PatternEndIndex = 0 Else PatternEndIndex = Pattern.Length End If If Source Is Nothing Then SourceEndIndex = 0 Else SourceEndIndex = Source.Length End If If SourceIndex < SourceEndIndex Then s = Source.Chars(SourceIndex) End If Do While PatternIndex < PatternEndIndex p = Pattern.Chars(PatternIndex) If p = "*"c AndAlso (Not InsideBracket) Then 'If Then Else has faster performance the Select Case 'Determine how many source chars to skip Skip = AsteriskSkip(Pattern.Substring(PatternIndex + 1), Source.Substring(SourceIndex), SourceEndIndex - SourceIndex, CompareMethod.Binary, m_InvariantCompareInfo) If Skip < 0 Then Return False ElseIf Skip > 0 Then SourceIndex += Skip If SourceIndex < SourceEndIndex Then s = Source.Chars(SourceIndex) End If End If ElseIf p = "?"c AndAlso (Not InsideBracket) Then 'Match any character SourceIndex = SourceIndex + 1 If SourceIndex < SourceEndIndex Then s = Source.Chars(SourceIndex) End If ElseIf p = "#"c AndAlso (Not InsideBracket) Then If Not System.Char.IsDigit(s) Then Exit Do End If SourceIndex = SourceIndex + 1 If SourceIndex < SourceEndIndex Then s = Source.Chars(SourceIndex) End If ElseIf p = "-"c AndAlso _ (InsideBracket AndAlso SeenLiteral AndAlso (Not LiteralIsRangeEnd) AndAlso (Not SeenHyphen)) AndAlso _ (((PatternIndex + 1) >= PatternEndIndex) OrElse (Pattern.Chars(PatternIndex + 1) <> "]"c)) Then SeenHyphen = True ElseIf p = "!"c AndAlso _ (InsideBracket AndAlso (Not SeenNot)) Then SeenNot = True Match = True ElseIf p = "["c AndAlso (Not InsideBracket) Then InsideBracket = True StartRangeChar = NullChar EndRangeChar = NullChar SeenLiteral = False ElseIf p = "]"c AndAlso InsideBracket Then InsideBracket = False If SeenLiteral Then If Match Then SourceIndex += 1 If SourceIndex < SourceEndIndex Then s = Source.Chars(SourceIndex) End If Else Exit Do End If ElseIf SeenHyphen Then If Not Match Then Exit Do End If ElseIf SeenNot Then '[!] should be matched to literal ! same as if outside brackets If "!"c <> s Then Exit Do End If SourceIndex += 1 If SourceIndex < SourceEndIndex Then s = Source.Chars(SourceIndex) End If End If Match = False SeenLiteral = False SeenNot = False SeenHyphen = False Else 'Literal character SeenLiteral = True LiteralIsRangeEnd = False If InsideBracket Then If SeenHyphen Then SeenHyphen = False LiteralIsRangeEnd = True EndRangeChar = p If StartRangeChar > EndRangeChar Then Throw VbMakeException(vbErrors.BadPatStr) ElseIf (SeenNot AndAlso Match) OrElse (Not SeenNot AndAlso Not Match) Then 'Calls to ci.Compare are expensive, avoid them for good performance Match = (s > StartRangeChar) AndAlso (s <= EndRangeChar) If SeenNot Then Match = Not Match End If End If Else StartRangeChar = p 'This compare handles non range chars such as the "abc" and "uvw" 'and the first char of a range such as "d" in "[abcd-tuvw]". Match = StrLikeCompareBinary(SeenNot, Match, p, s) End If Else If p <> s AndAlso Not SeenNot Then Exit Do End If SeenNot = False SourceIndex += 1 If SourceIndex < SourceEndIndex Then s = Source.Chars(SourceIndex) ElseIf SourceIndex > SourceEndIndex Then Return False End If End If End If PatternIndex += 1 Loop If InsideBracket Then If SourceEndIndex = 0 Then Return False Else Throw New ArgumentException(SR.Format(SR.Argument_InvalidValue1, "Pattern")) End If Else Return (PatternIndex = PatternEndIndex) AndAlso (SourceIndex = SourceEndIndex) End If End Function Public Shared Function StrLikeText(ByVal Source As String, ByVal Pattern As String) As Boolean 'Match Source to Pattern using "?*#[!a-g]" pattern matching characters Dim SourceIndex As Integer Dim PatternIndex As Integer Dim SourceEndIndex As Integer Dim PatternEndIndex As Integer Dim p As Char Dim s As Char Dim InsideBracket As Boolean Dim SeenHyphen As Boolean Dim StartRangeChar As Char Dim EndRangeChar As Char Dim Match As Boolean Dim SeenLiteral As Boolean Dim SeenNot As Boolean Dim Skip As Integer Dim Options As CompareOptions Dim ci As CompareInfo Const NullChar As Char = ChrW(0) Dim LiteralIsRangeEnd As Boolean = False If Pattern Is Nothing Then PatternEndIndex = 0 Else PatternEndIndex = Pattern.Length End If If Source Is Nothing Then SourceEndIndex = 0 Else SourceEndIndex = Source.Length End If If SourceIndex < SourceEndIndex Then s = Source.Chars(SourceIndex) End If ci = GetCultureInfo().CompareInfo Options = CompareOptions.IgnoreCase Or _ CompareOptions.IgnoreWidth Or _ CompareOptions.IgnoreNonSpace Or _ CompareOptions.IgnoreKanaType Do While PatternIndex < PatternEndIndex p = Pattern.Chars(PatternIndex) If p = "*"c AndAlso (Not InsideBracket) Then 'If Then Else has faster performance the Select Case 'Determine how many source chars to skip Skip = AsteriskSkip(Pattern.Substring(PatternIndex + 1), Source.Substring(SourceIndex), SourceEndIndex - SourceIndex, CompareMethod.Text, ci) If Skip < 0 Then Return False ElseIf Skip > 0 Then SourceIndex += Skip If SourceIndex < SourceEndIndex Then s = Source.Chars(SourceIndex) End If End If ElseIf p = "?"c AndAlso (Not InsideBracket) Then 'Match any character SourceIndex = SourceIndex + 1 If SourceIndex < SourceEndIndex Then s = Source.Chars(SourceIndex) End If ElseIf p = "#"c AndAlso (Not InsideBracket) Then If Not System.Char.IsDigit(s) Then Exit Do End If SourceIndex = SourceIndex + 1 If SourceIndex < SourceEndIndex Then s = Source.Chars(SourceIndex) End If ElseIf p = "-"c AndAlso _ (InsideBracket AndAlso SeenLiteral AndAlso (Not LiteralIsRangeEnd) AndAlso (Not SeenHyphen)) AndAlso _ (((PatternIndex + 1) >= PatternEndIndex) OrElse (Pattern.Chars(PatternIndex + 1) <> "]"c)) Then SeenHyphen = True ElseIf p = "!"c AndAlso _ (InsideBracket AndAlso Not SeenNot) Then SeenNot = True Match = True ElseIf p = "["c AndAlso (Not InsideBracket) Then InsideBracket = True StartRangeChar = NullChar EndRangeChar = NullChar SeenLiteral = False ElseIf p = "]"c AndAlso InsideBracket Then InsideBracket = False If SeenLiteral Then If Match Then SourceIndex += 1 If SourceIndex < SourceEndIndex Then s = Source.Chars(SourceIndex) End If Else Exit Do End If ElseIf SeenHyphen Then If Not Match Then Exit Do End If ElseIf SeenNot Then '[!] should be matched to literal ! same as if outside brackets If (ci.Compare("!", s) <> 0) Then Exit Do End If SourceIndex += 1 If SourceIndex < SourceEndIndex Then s = Source.Chars(SourceIndex) End If End If Match = False SeenLiteral = False SeenNot = False SeenHyphen = False Else 'Literal character SeenLiteral = True LiteralIsRangeEnd = False If InsideBracket Then If SeenHyphen Then SeenHyphen = False LiteralIsRangeEnd = True EndRangeChar = p If StartRangeChar > EndRangeChar Then Throw VbMakeException(vbErrors.BadPatStr) ElseIf (SeenNot AndAlso Match) OrElse (Not SeenNot AndAlso Not Match) Then 'Calls to ci.Compare are expensive, avoid them for good performance If Options = CompareOptions.Ordinal Then Match = (s > StartRangeChar) AndAlso (s <= EndRangeChar) Else Match = (ci.Compare(StartRangeChar, s, Options) < 0) AndAlso (ci.Compare(EndRangeChar, s, Options) >= 0) End If If SeenNot Then Match = Not Match End If End If Else StartRangeChar = p 'This compare handles non range chars such as the "abc" and "uvw" 'and the first char of a range such as "d" in "[abcd-tuvw]". Match = StrLikeCompare(ci, SeenNot, Match, p, s, Options) End If Else If Options = CompareOptions.Ordinal Then If p <> s AndAlso Not SeenNot Then Exit Do End If Else ' Slurp up the diacritical marks, if any (both non-spacing marks and modifier symbols) ' Note that typically, we'll only have at most one diacritical mark. Therefore, I'm not ' using StringBuilder here, since the minimal overhead of appending a character doesn't ' justify invoking a couple of instances of StringBuilder. . Dim pstr As String = p Dim sstr As String = s Do While PatternIndex + 1 < PatternEndIndex AndAlso _ (UnicodeCategory.ModifierSymbol = Char.GetUnicodeCategory(Pattern.Chars(PatternIndex + 1)) OrElse _ UnicodeCategory.NonSpacingMark = Char.GetUnicodeCategory(Pattern.Chars(PatternIndex + 1))) pstr = pstr & Pattern.Chars(PatternIndex + 1) PatternIndex = PatternIndex + 1 Loop Do While SourceIndex + 1 < SourceEndIndex AndAlso _ (UnicodeCategory.ModifierSymbol = Char.GetUnicodeCategory(Source.Chars(SourceIndex + 1)) OrElse _ UnicodeCategory.NonSpacingMark = Char.GetUnicodeCategory(Source.Chars(SourceIndex + 1))) sstr = sstr & Source.Chars(SourceIndex + 1) SourceIndex = SourceIndex + 1 Loop If (ci.Compare(pstr, sstr, OptionCompareTextFlags) <> 0) AndAlso Not SeenNot Then Exit Do End If End If SeenNot = False SourceIndex += 1 If SourceIndex < SourceEndIndex Then s = Source.Chars(SourceIndex) ElseIf SourceIndex > SourceEndIndex Then Return False End If End If End If PatternIndex += 1 Loop If InsideBracket Then If SourceEndIndex = 0 Then Return False Else Throw New ArgumentException(SR.Format(SR.Argument_InvalidValue1, "Pattern")) End If Else Return (PatternIndex = PatternEndIndex) AndAlso (SourceIndex = SourceEndIndex) End If End Function Private Shared Function StrLikeCompareBinary(ByVal SeenNot As Boolean, ByVal Match As Boolean, ByVal p As Char, ByVal s As Char) As Boolean If SeenNot AndAlso Match Then Return p <> s ElseIf Not SeenNot AndAlso Not Match Then Return p = s Else Return Match End If End Function Private Shared Function StrLikeCompare(ByVal ci As CompareInfo, ByVal SeenNot As Boolean, ByVal Match As Boolean, ByVal p As Char, ByVal s As Char, ByVal Options As CompareOptions) As Boolean If SeenNot AndAlso Match Then If Options = CompareOptions.Ordinal Then Return p <> s Else Return Not (ci.Compare(p, s, Options) = 0) End If ElseIf Not SeenNot AndAlso Not Match Then If Options = CompareOptions.Ordinal Then Return p = s Else Return (ci.Compare(p, s, Options) = 0) End If Else Return Match End If End Function Private Shared Function AsteriskSkip(ByVal Pattern As String, ByVal Source As String, ByVal SourceEndIndex As Integer, _ ByVal CompareOption As CompareMethod, ByVal ci As CompareInfo) As Integer 'Returns the number of source characters to skip over to handle an asterisk in the pattern. 'When there's only a single asterisk in the pattern, it computes how many pattern equivalent chars 'follow the *: [a-z], [abcde], ?, # each count as one char. 'Pattern contains the substring following the * 'Source contains the substring not yet matched. Dim p As Char Dim SeenLiteral As Boolean Dim SeenSpecial As Boolean 'Remembers if we've seen #, ?, [abd-eg], or ! when they have their special meanings Dim InsideBracket As Boolean Dim Count As Integer Dim PatternEndIndex As Integer Dim PatternIndex As Integer Dim TruncatedPattern As String Dim Options As CompareOptions PatternEndIndex = Len(Pattern) 'Determine how many pattern equivalent chars follow the *, and if there are multiple *s '[a-z], [abcde] each count as one char. Do While PatternIndex < PatternEndIndex p = Pattern.Chars(PatternIndex) Select Case p Case "*"c If Count > 0 Then 'We found multiple asterisks with an intervening pattern If SeenSpecial Then 'Pattern uses special characters which means we can't compute easily how far to skip. Count = MultipleAsteriskSkip(Pattern, Source, Count, CompareOption) Return SourceEndIndex - Count Else 'Pattern uses only literals, so we can directly search for the pattern in the source TruncatedPattern = Pattern.Substring(0, PatternIndex) 'Remove the second * and everything trailing If CompareOption = CompareMethod.Binary Then Options = CompareOptions.Ordinal Else Options = CompareOptions.IgnoreCase Or CompareOptions.IgnoreWidth Or CompareOptions.IgnoreNonSpace Or CompareOptions.IgnoreKanaType End If 'Count = Source.LastIndexOf(TruncatedPattern) Count = ci.LastIndexOf(Source, TruncatedPattern, Options) Return Count End If Else 'Do nothing, which colalesces multiple asterisks together End If Case "-"c If Pattern.Chars(PatternIndex + 1) = "]"c Then SeenLiteral = True End If Case "!"c If Pattern.Chars(PatternIndex + 1) = "]"c Then SeenLiteral = True Else SeenSpecial = True End If Case "["c If InsideBracket Then SeenLiteral = True Else InsideBracket = True End If Case "]"c If SeenLiteral OrElse Not InsideBracket Then Count += 1 SeenSpecial = True End If SeenLiteral = False InsideBracket = False Case "?"c, "#"c If InsideBracket Then SeenLiteral = True Else Count += 1 SeenSpecial = True End If Case Else If InsideBracket Then SeenLiteral = True Else Count += 1 End If End Select PatternIndex += 1 Loop Return SourceEndIndex - Count End Function Private Shared Function MultipleAsteriskSkip(ByVal Pattern As String, ByVal Source As String, ByVal Count As Integer, ByVal CompareOption As CompareMethod) As Integer 'Multiple asterisks with intervening chars were found in the pattern, such as "*<chars>*". 'Use a recursive approach to determine how many source chars to skip. 'Start near the end of Source and move backwards one char at a time until a match is found or we reach start of Source. Dim SourceEndIndex As Integer Dim NewSource As String Dim Result As Boolean SourceEndIndex = Len(Source) Do While Count < SourceEndIndex NewSource = Source.Substring(SourceEndIndex - Count) Try Result = StrLike(NewSource, Pattern, CompareOption) Catch ex As StackOverflowException Throw ex Catch ex As OutOfMemoryException Throw ex Catch Result = False End Try If Result Then Exit Do End If Count += 1 Loop Return Count End Function Public Shared Sub MidStmtStr(ByRef sDest As String, ByVal StartPosition As Integer, ByVal MaxInsertLength As Integer, ByVal sInsert As String) Dim DestLength As Integer Dim InsertLength As Integer Dim EndSegmentLength As Integer If sDest Is Nothing Then 'DestLength = 0 Else DestLength = sDest.Length End If If sInsert Is Nothing Then 'InsertLength = 0 Else InsertLength = sInsert.Length End If 'Zero base the index StartPosition -= 1 If StartPosition < 0 OrElse StartPosition >= DestLength Then Throw New ArgumentException(SR.Format(SR.Argument_InvalidValue1, "Start")) End If If MaxInsertLength < 0 Then Throw New ArgumentException(SR.Format(SR.Argument_InvalidValue1, "Length")) End If ' first, limit the length of the source string ' to lenChange If (InsertLength > MaxInsertLength) Then InsertLength = MaxInsertLength End If ' second, limit the length to the available space ' in the destination string If (InsertLength > DestLength - StartPosition) Then InsertLength = DestLength - StartPosition End If If InsertLength = 0 Then 'Destination string remains unchanged Exit Sub End If 'This looks a bit complex for removing and inserting strings 'but when manipulating long strings, it should provide 'better performance because of fewer memcpys Dim sb As StringBuilder sb = New StringBuilder(DestLength) If StartPosition > 0 Then 'Append first part of destination string sb.Append(sDest, 0, StartPosition) End If 'Append InsertString sb.Append(sInsert, 0, InsertLength) EndSegmentLength = DestLength - (StartPosition + InsertLength) If EndSegmentLength > 0 Then 'Append remainder of destination string sb.Append(sDest, StartPosition + InsertLength, EndSegmentLength) End If sDest = sb.ToString() End Sub End Class End Namespace
-1
dotnet/runtime
66,235
Revert "Fix issues related to JsonSerializerOptions mutation and caching."
Reverts dotnet/runtime#65863 Fixes #66232
jkotas
2022-03-05T06:23:45Z
2022-03-05T14:08:07Z
17662fc30cfd4cc6e7dbba978a3fb512380c0b70
3ede8095da51d5d27a890020b61376155e9a61c2
Revert "Fix issues related to JsonSerializerOptions mutation and caching.". Reverts dotnet/runtime#65863 Fixes #66232
./src/libraries/Common/src/Interop/Windows/Crypt32/Interop.CertEnumCertificatesInStore_IntPtr.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Runtime.InteropServices; using Microsoft.Win32.SafeHandles; internal static partial class Interop { internal static partial class Crypt32 { [GeneratedDllImport(Interop.Libraries.Crypt32, SetLastError = true)] internal static partial IntPtr CertEnumCertificatesInStore(SafeCertStoreHandle hCertStore, IntPtr pPrevCertContext); } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Runtime.InteropServices; using Microsoft.Win32.SafeHandles; internal static partial class Interop { internal static partial class Crypt32 { [GeneratedDllImport(Interop.Libraries.Crypt32, SetLastError = true)] internal static partial IntPtr CertEnumCertificatesInStore(SafeCertStoreHandle hCertStore, IntPtr pPrevCertContext); } }
-1
dotnet/runtime
66,235
Revert "Fix issues related to JsonSerializerOptions mutation and caching."
Reverts dotnet/runtime#65863 Fixes #66232
jkotas
2022-03-05T06:23:45Z
2022-03-05T14:08:07Z
17662fc30cfd4cc6e7dbba978a3fb512380c0b70
3ede8095da51d5d27a890020b61376155e9a61c2
Revert "Fix issues related to JsonSerializerOptions mutation and caching.". Reverts dotnet/runtime#65863 Fixes #66232
./src/tests/Loader/classloader/TypeGeneratorTests/TypeGeneratorTest576/Generated576.ilproj
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <ItemGroup> <Compile Include="Generated576.il" /> </ItemGroup> <ItemGroup> <ProjectReference Include="..\TestFramework\TestFramework.csproj" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <ItemGroup> <Compile Include="Generated576.il" /> </ItemGroup> <ItemGroup> <ProjectReference Include="..\TestFramework\TestFramework.csproj" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,235
Revert "Fix issues related to JsonSerializerOptions mutation and caching."
Reverts dotnet/runtime#65863 Fixes #66232
jkotas
2022-03-05T06:23:45Z
2022-03-05T14:08:07Z
17662fc30cfd4cc6e7dbba978a3fb512380c0b70
3ede8095da51d5d27a890020b61376155e9a61c2
Revert "Fix issues related to JsonSerializerOptions mutation and caching.". Reverts dotnet/runtime#65863 Fixes #66232
./src/libraries/System.Private.Xml/src/System/Xml/Serialization/ImportContext.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. namespace System.Xml.Serialization { using System; using System.IO; using System.Xml; using System.Xml.Schema; using System.Xml.Serialization; using System.Collections; using System.Collections.Specialized; using System.Reflection; using System.Diagnostics.CodeAnalysis; public class ImportContext { private readonly bool _shareTypes; private SchemaObjectCache? _cache; // cached schema top-level items private Hashtable? _mappings; // XmlSchema -> SerializableMapping, XmlSchemaSimpleType -> EnumMapping, XmlSchemaComplexType -> StructMapping private Hashtable? _elements; // XmlSchemaElement -> ElementAccessor private CodeIdentifiers? _typeIdentifiers; public ImportContext(CodeIdentifiers? identifiers, bool shareTypes) { _typeIdentifiers = identifiers; _shareTypes = shareTypes; } internal ImportContext() : this(null, false) { } internal SchemaObjectCache Cache { get { if (_cache == null) _cache = new SchemaObjectCache(); return _cache; } } internal Hashtable Elements { get { if (_elements == null) _elements = new Hashtable(); return _elements; } } internal Hashtable Mappings { get { if (_mappings == null) _mappings = new Hashtable(); return _mappings; } } public CodeIdentifiers TypeIdentifiers { get { if (_typeIdentifiers == null) _typeIdentifiers = new CodeIdentifiers(); return _typeIdentifiers; } } public bool ShareTypes { get { return _shareTypes; } } public StringCollection Warnings { get { return Cache.Warnings; } } } internal sealed class SchemaObjectCache { private Hashtable? _graph; private Hashtable? _hash; private Hashtable? _objectCache; private StringCollection? _warnings; // UNDONE remove me soon, this is debug only code internal Hashtable looks = new Hashtable(); private Hashtable Graph { get { if (_graph == null) _graph = new Hashtable(); return _graph; } } private Hashtable Hash { get { if (_hash == null) _hash = new Hashtable(); return _hash; } } private Hashtable ObjectCache { get { if (_objectCache == null) _objectCache = new Hashtable(); return _objectCache; } } internal StringCollection Warnings { get { if (_warnings == null) _warnings = new StringCollection(); return _warnings; } } internal XmlSchemaObject? AddItem(XmlSchemaObject? item, XmlQualifiedName? qname, XmlSchemas schemas) { if (item == null) return null; if (qname == null || qname.IsEmpty) return null; string key = $"{item.GetType().Name}:{qname}"; ArrayList? list = (ArrayList?)ObjectCache[key]; if (list == null) { list = new ArrayList(); ObjectCache[key] = list; } for (int i = 0; i < list.Count; i++) { XmlSchemaObject cachedItem = (XmlSchemaObject)list[i]!; if (cachedItem == item) return cachedItem; if (Match(cachedItem, item, true)) { return cachedItem; } else { Warnings.Add(SR.Format(SR.XmlMismatchSchemaObjects, item.GetType().Name, qname.Name, qname.Namespace)); Warnings.Add($"DEBUG:Cached item key:\r\n{(string?)looks[cachedItem]}\r\nnew item key:\r\n{(string?)looks[item]}"); } } // no match found we need to insert the new type in the cache list.Add(item); return item; } internal bool Match(XmlSchemaObject o1, XmlSchemaObject o2, bool shareTypes) { if (o1 == o2) return true; if (o1.GetType() != o2.GetType()) return false; if (Hash[o1] == null) Hash[o1] = GetHash(o1); int hash1 = (int)Hash[o1]!; int hash2 = GetHash(o2); if (hash1 != hash2) return false; if (shareTypes) return CompositeHash(o1, hash1) == CompositeHash(o2, hash2); return true; } private ArrayList GetDependencies(XmlSchemaObject o, ArrayList deps, Hashtable refs) { if (refs[o] == null) { refs[o] = o; deps.Add(o); ArrayList? list = Graph[o] as ArrayList; if (list != null) { for (int i = 0; i < list.Count; i++) { GetDependencies((XmlSchemaObject)list[i]!, deps, refs); } } } return deps; } private int CompositeHash(XmlSchemaObject o, int hash) { ArrayList list = GetDependencies(o, new ArrayList(), new Hashtable()); double tmp = 0; for (int i = 0; i < list.Count; i++) { object? cachedHash = Hash[list[i]!]; if (cachedHash is int) { tmp += (int)cachedHash / list.Count; } } return (int)tmp; } [RequiresUnreferencedCode("creates SchemaGraph")] internal void GenerateSchemaGraph(XmlSchemas schemas) { SchemaGraph graph = new SchemaGraph(Graph, schemas); ArrayList items = graph.GetItems(); for (int i = 0; i < items.Count; i++) { GetHash((XmlSchemaObject)items[i]!); } } private int GetHash(XmlSchemaObject o) { object? hash = Hash[o]; if (hash != null) { if (hash is XmlSchemaObject) { } else { return (int)hash; } } // new object, generate the hash string hashString = ToString(o, new SchemaObjectWriter()); looks[o] = hashString; int code = hashString.GetHashCode(); Hash[o] = code; return code; } private string ToString(XmlSchemaObject o, SchemaObjectWriter writer) { return writer.WriteXmlSchemaObject(o); } } internal sealed class SchemaGraph { private readonly ArrayList _empty = new ArrayList(); private readonly XmlSchemas _schemas; private readonly Hashtable _scope; private readonly int _items; [RequiresUnreferencedCode("Calls Compile")] internal SchemaGraph(Hashtable scope, XmlSchemas schemas) { _scope = scope; schemas.Compile(null, false); _schemas = schemas; _items = 0; foreach (XmlSchema s in schemas) { _items += s.Items.Count; foreach (XmlSchemaObject item in s.Items) { Depends(item); } } } internal ArrayList GetItems() { return new ArrayList(_scope.Keys); } internal void AddRef(ArrayList list, XmlSchemaObject? o) { if (o == null) return; if (_schemas.IsReference(o)) return; if (o.Parent is XmlSchema parent) { string? ns = parent.TargetNamespace; if (ns == XmlSchema.Namespace) return; if (list.Contains(o)) return; list.Add(o); } } internal ArrayList Depends(XmlSchemaObject item) { if (item.Parent is XmlSchema) { if (_scope[item] != null) return (ArrayList)_scope[item]!; ArrayList refs = new ArrayList(); Depends(item, refs); _scope.Add(item, refs); return refs; } return _empty; } internal void Depends(XmlSchemaObject? item, ArrayList refs) { if (item == null || _scope[item] != null) return; Type t = item.GetType(); if (typeof(XmlSchemaType).IsAssignableFrom(t)) { XmlQualifiedName baseName = XmlQualifiedName.Empty; XmlSchemaType? baseType = null; XmlSchemaParticle? particle = null; XmlSchemaObjectCollection? attributes = null; if (item is XmlSchemaComplexType) { XmlSchemaComplexType ct = (XmlSchemaComplexType)item; if (ct.ContentModel != null) { XmlSchemaContent? content = ct.ContentModel.Content; if (content is XmlSchemaComplexContentRestriction) { baseName = ((XmlSchemaComplexContentRestriction)content).BaseTypeName; attributes = ((XmlSchemaComplexContentRestriction)content).Attributes; } else if (content is XmlSchemaSimpleContentRestriction) { XmlSchemaSimpleContentRestriction restriction = (XmlSchemaSimpleContentRestriction)content; if (restriction.BaseType != null) baseType = restriction.BaseType; else baseName = restriction.BaseTypeName; attributes = restriction.Attributes; } else if (content is XmlSchemaComplexContentExtension) { XmlSchemaComplexContentExtension extension = (XmlSchemaComplexContentExtension)content; attributes = extension.Attributes; particle = extension.Particle; baseName = extension.BaseTypeName; } else if (content is XmlSchemaSimpleContentExtension) { XmlSchemaSimpleContentExtension extension = (XmlSchemaSimpleContentExtension)content; attributes = extension.Attributes; baseName = extension.BaseTypeName; } } else { attributes = ct.Attributes; particle = ct.Particle; } if (particle is XmlSchemaGroupRef) { XmlSchemaGroupRef refGroup = (XmlSchemaGroupRef)particle; particle = ((XmlSchemaGroup)_schemas.Find(refGroup.RefName, typeof(XmlSchemaGroup), false)!).Particle; } else if (particle is XmlSchemaGroupBase) { particle = (XmlSchemaGroupBase)particle; } } else if (item is XmlSchemaSimpleType) { XmlSchemaSimpleType simpleType = (XmlSchemaSimpleType)item; XmlSchemaSimpleTypeContent? content = simpleType.Content; if (content is XmlSchemaSimpleTypeRestriction) { baseType = ((XmlSchemaSimpleTypeRestriction)content).BaseType; baseName = ((XmlSchemaSimpleTypeRestriction)content).BaseTypeName; } else if (content is XmlSchemaSimpleTypeList) { XmlSchemaSimpleTypeList list = (XmlSchemaSimpleTypeList)content; if (list.ItemTypeName != null && !list.ItemTypeName.IsEmpty) baseName = list.ItemTypeName; if (list.ItemType != null) { baseType = list.ItemType; } } else if (t == typeof(XmlSchemaSimpleTypeUnion)) { XmlQualifiedName[]? memberTypes = ((XmlSchemaSimpleTypeUnion)item).MemberTypes; if (memberTypes != null) { for (int i = 0; i < memberTypes.Length; i++) { XmlSchemaType? type = (XmlSchemaType?)_schemas.Find(memberTypes[i], typeof(XmlSchemaType), false); AddRef(refs, type); } } } } if (baseType == null && !baseName.IsEmpty && baseName.Namespace != XmlSchema.Namespace) baseType = (XmlSchemaType?)_schemas.Find(baseName, typeof(XmlSchemaType), false); if (baseType != null) { AddRef(refs, baseType); } if (particle != null) { Depends(particle, refs); } if (attributes != null) { for (int i = 0; i < attributes.Count; i++) { Depends(attributes[i], refs); } } } else if (t == typeof(XmlSchemaElement)) { XmlSchemaElement el = (XmlSchemaElement)item; if (!el.SubstitutionGroup.IsEmpty) { if (el.SubstitutionGroup.Namespace != XmlSchema.Namespace) { XmlSchemaElement? head = (XmlSchemaElement?)_schemas.Find(el.SubstitutionGroup, typeof(XmlSchemaElement), false); AddRef(refs, head); } } if (!el.RefName.IsEmpty) { el = (XmlSchemaElement)_schemas.Find(el.RefName, typeof(XmlSchemaElement), false)!; AddRef(refs, el); } else if (!el.SchemaTypeName.IsEmpty) { XmlSchemaType? type = (XmlSchemaType?)_schemas.Find(el.SchemaTypeName, typeof(XmlSchemaType), false); AddRef(refs, type); } else { Depends(el.SchemaType, refs); } } else if (t == typeof(XmlSchemaGroup)) { Depends(((XmlSchemaGroup)item).Particle!); } else if (t == typeof(XmlSchemaGroupRef)) { XmlSchemaGroup? group = (XmlSchemaGroup?)_schemas.Find(((XmlSchemaGroupRef)item).RefName, typeof(XmlSchemaGroup), false); AddRef(refs, group); } else if (typeof(XmlSchemaGroupBase).IsAssignableFrom(t)) { foreach (XmlSchemaObject o in ((XmlSchemaGroupBase)item).Items) { Depends(o, refs); } } else if (t == typeof(XmlSchemaAttributeGroupRef)) { XmlSchemaAttributeGroup? group = (XmlSchemaAttributeGroup?)_schemas.Find(((XmlSchemaAttributeGroupRef)item).RefName, typeof(XmlSchemaAttributeGroup), false); AddRef(refs, group); } else if (t == typeof(XmlSchemaAttributeGroup)) { foreach (XmlSchemaObject o in ((XmlSchemaAttributeGroup)item).Attributes) { Depends(o, refs); } } else if (t == typeof(XmlSchemaAttribute)) { XmlSchemaAttribute? at = (XmlSchemaAttribute)item; if (!at.RefName.IsEmpty) { at = (XmlSchemaAttribute?)_schemas.Find(at.RefName, typeof(XmlSchemaAttribute), false); AddRef(refs, at); } else if (!at.SchemaTypeName.IsEmpty) { XmlSchemaType? type = (XmlSchemaType?)_schemas.Find(at.SchemaTypeName, typeof(XmlSchemaType), false); AddRef(refs, type); } else { Depends(at.SchemaType, refs); } } if (typeof(XmlSchemaAnnotated).IsAssignableFrom(t)) { XmlAttribute[]? attrs = (XmlAttribute[]?)((XmlSchemaAnnotated)item).UnhandledAttributes; if (attrs != null) { for (int i = 0; i < attrs.Length; i++) { XmlAttribute attribute = attrs[i]; if (attribute.LocalName == Wsdl.ArrayType && attribute.NamespaceURI == Wsdl.Namespace) { XmlQualifiedName qname = TypeScope.ParseWsdlArrayType(attribute.Value, out _, item); XmlSchemaType? type = (XmlSchemaType?)_schemas.Find(qname, typeof(XmlSchemaType), false); AddRef(refs, type); } } } } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. namespace System.Xml.Serialization { using System; using System.IO; using System.Xml; using System.Xml.Schema; using System.Xml.Serialization; using System.Collections; using System.Collections.Specialized; using System.Reflection; using System.Diagnostics.CodeAnalysis; public class ImportContext { private readonly bool _shareTypes; private SchemaObjectCache? _cache; // cached schema top-level items private Hashtable? _mappings; // XmlSchema -> SerializableMapping, XmlSchemaSimpleType -> EnumMapping, XmlSchemaComplexType -> StructMapping private Hashtable? _elements; // XmlSchemaElement -> ElementAccessor private CodeIdentifiers? _typeIdentifiers; public ImportContext(CodeIdentifiers? identifiers, bool shareTypes) { _typeIdentifiers = identifiers; _shareTypes = shareTypes; } internal ImportContext() : this(null, false) { } internal SchemaObjectCache Cache { get { if (_cache == null) _cache = new SchemaObjectCache(); return _cache; } } internal Hashtable Elements { get { if (_elements == null) _elements = new Hashtable(); return _elements; } } internal Hashtable Mappings { get { if (_mappings == null) _mappings = new Hashtable(); return _mappings; } } public CodeIdentifiers TypeIdentifiers { get { if (_typeIdentifiers == null) _typeIdentifiers = new CodeIdentifiers(); return _typeIdentifiers; } } public bool ShareTypes { get { return _shareTypes; } } public StringCollection Warnings { get { return Cache.Warnings; } } } internal sealed class SchemaObjectCache { private Hashtable? _graph; private Hashtable? _hash; private Hashtable? _objectCache; private StringCollection? _warnings; // UNDONE remove me soon, this is debug only code internal Hashtable looks = new Hashtable(); private Hashtable Graph { get { if (_graph == null) _graph = new Hashtable(); return _graph; } } private Hashtable Hash { get { if (_hash == null) _hash = new Hashtable(); return _hash; } } private Hashtable ObjectCache { get { if (_objectCache == null) _objectCache = new Hashtable(); return _objectCache; } } internal StringCollection Warnings { get { if (_warnings == null) _warnings = new StringCollection(); return _warnings; } } internal XmlSchemaObject? AddItem(XmlSchemaObject? item, XmlQualifiedName? qname, XmlSchemas schemas) { if (item == null) return null; if (qname == null || qname.IsEmpty) return null; string key = $"{item.GetType().Name}:{qname}"; ArrayList? list = (ArrayList?)ObjectCache[key]; if (list == null) { list = new ArrayList(); ObjectCache[key] = list; } for (int i = 0; i < list.Count; i++) { XmlSchemaObject cachedItem = (XmlSchemaObject)list[i]!; if (cachedItem == item) return cachedItem; if (Match(cachedItem, item, true)) { return cachedItem; } else { Warnings.Add(SR.Format(SR.XmlMismatchSchemaObjects, item.GetType().Name, qname.Name, qname.Namespace)); Warnings.Add($"DEBUG:Cached item key:\r\n{(string?)looks[cachedItem]}\r\nnew item key:\r\n{(string?)looks[item]}"); } } // no match found we need to insert the new type in the cache list.Add(item); return item; } internal bool Match(XmlSchemaObject o1, XmlSchemaObject o2, bool shareTypes) { if (o1 == o2) return true; if (o1.GetType() != o2.GetType()) return false; if (Hash[o1] == null) Hash[o1] = GetHash(o1); int hash1 = (int)Hash[o1]!; int hash2 = GetHash(o2); if (hash1 != hash2) return false; if (shareTypes) return CompositeHash(o1, hash1) == CompositeHash(o2, hash2); return true; } private ArrayList GetDependencies(XmlSchemaObject o, ArrayList deps, Hashtable refs) { if (refs[o] == null) { refs[o] = o; deps.Add(o); ArrayList? list = Graph[o] as ArrayList; if (list != null) { for (int i = 0; i < list.Count; i++) { GetDependencies((XmlSchemaObject)list[i]!, deps, refs); } } } return deps; } private int CompositeHash(XmlSchemaObject o, int hash) { ArrayList list = GetDependencies(o, new ArrayList(), new Hashtable()); double tmp = 0; for (int i = 0; i < list.Count; i++) { object? cachedHash = Hash[list[i]!]; if (cachedHash is int) { tmp += (int)cachedHash / list.Count; } } return (int)tmp; } [RequiresUnreferencedCode("creates SchemaGraph")] internal void GenerateSchemaGraph(XmlSchemas schemas) { SchemaGraph graph = new SchemaGraph(Graph, schemas); ArrayList items = graph.GetItems(); for (int i = 0; i < items.Count; i++) { GetHash((XmlSchemaObject)items[i]!); } } private int GetHash(XmlSchemaObject o) { object? hash = Hash[o]; if (hash != null) { if (hash is XmlSchemaObject) { } else { return (int)hash; } } // new object, generate the hash string hashString = ToString(o, new SchemaObjectWriter()); looks[o] = hashString; int code = hashString.GetHashCode(); Hash[o] = code; return code; } private string ToString(XmlSchemaObject o, SchemaObjectWriter writer) { return writer.WriteXmlSchemaObject(o); } } internal sealed class SchemaGraph { private readonly ArrayList _empty = new ArrayList(); private readonly XmlSchemas _schemas; private readonly Hashtable _scope; private readonly int _items; [RequiresUnreferencedCode("Calls Compile")] internal SchemaGraph(Hashtable scope, XmlSchemas schemas) { _scope = scope; schemas.Compile(null, false); _schemas = schemas; _items = 0; foreach (XmlSchema s in schemas) { _items += s.Items.Count; foreach (XmlSchemaObject item in s.Items) { Depends(item); } } } internal ArrayList GetItems() { return new ArrayList(_scope.Keys); } internal void AddRef(ArrayList list, XmlSchemaObject? o) { if (o == null) return; if (_schemas.IsReference(o)) return; if (o.Parent is XmlSchema parent) { string? ns = parent.TargetNamespace; if (ns == XmlSchema.Namespace) return; if (list.Contains(o)) return; list.Add(o); } } internal ArrayList Depends(XmlSchemaObject item) { if (item.Parent is XmlSchema) { if (_scope[item] != null) return (ArrayList)_scope[item]!; ArrayList refs = new ArrayList(); Depends(item, refs); _scope.Add(item, refs); return refs; } return _empty; } internal void Depends(XmlSchemaObject? item, ArrayList refs) { if (item == null || _scope[item] != null) return; Type t = item.GetType(); if (typeof(XmlSchemaType).IsAssignableFrom(t)) { XmlQualifiedName baseName = XmlQualifiedName.Empty; XmlSchemaType? baseType = null; XmlSchemaParticle? particle = null; XmlSchemaObjectCollection? attributes = null; if (item is XmlSchemaComplexType) { XmlSchemaComplexType ct = (XmlSchemaComplexType)item; if (ct.ContentModel != null) { XmlSchemaContent? content = ct.ContentModel.Content; if (content is XmlSchemaComplexContentRestriction) { baseName = ((XmlSchemaComplexContentRestriction)content).BaseTypeName; attributes = ((XmlSchemaComplexContentRestriction)content).Attributes; } else if (content is XmlSchemaSimpleContentRestriction) { XmlSchemaSimpleContentRestriction restriction = (XmlSchemaSimpleContentRestriction)content; if (restriction.BaseType != null) baseType = restriction.BaseType; else baseName = restriction.BaseTypeName; attributes = restriction.Attributes; } else if (content is XmlSchemaComplexContentExtension) { XmlSchemaComplexContentExtension extension = (XmlSchemaComplexContentExtension)content; attributes = extension.Attributes; particle = extension.Particle; baseName = extension.BaseTypeName; } else if (content is XmlSchemaSimpleContentExtension) { XmlSchemaSimpleContentExtension extension = (XmlSchemaSimpleContentExtension)content; attributes = extension.Attributes; baseName = extension.BaseTypeName; } } else { attributes = ct.Attributes; particle = ct.Particle; } if (particle is XmlSchemaGroupRef) { XmlSchemaGroupRef refGroup = (XmlSchemaGroupRef)particle; particle = ((XmlSchemaGroup)_schemas.Find(refGroup.RefName, typeof(XmlSchemaGroup), false)!).Particle; } else if (particle is XmlSchemaGroupBase) { particle = (XmlSchemaGroupBase)particle; } } else if (item is XmlSchemaSimpleType) { XmlSchemaSimpleType simpleType = (XmlSchemaSimpleType)item; XmlSchemaSimpleTypeContent? content = simpleType.Content; if (content is XmlSchemaSimpleTypeRestriction) { baseType = ((XmlSchemaSimpleTypeRestriction)content).BaseType; baseName = ((XmlSchemaSimpleTypeRestriction)content).BaseTypeName; } else if (content is XmlSchemaSimpleTypeList) { XmlSchemaSimpleTypeList list = (XmlSchemaSimpleTypeList)content; if (list.ItemTypeName != null && !list.ItemTypeName.IsEmpty) baseName = list.ItemTypeName; if (list.ItemType != null) { baseType = list.ItemType; } } else if (t == typeof(XmlSchemaSimpleTypeUnion)) { XmlQualifiedName[]? memberTypes = ((XmlSchemaSimpleTypeUnion)item).MemberTypes; if (memberTypes != null) { for (int i = 0; i < memberTypes.Length; i++) { XmlSchemaType? type = (XmlSchemaType?)_schemas.Find(memberTypes[i], typeof(XmlSchemaType), false); AddRef(refs, type); } } } } if (baseType == null && !baseName.IsEmpty && baseName.Namespace != XmlSchema.Namespace) baseType = (XmlSchemaType?)_schemas.Find(baseName, typeof(XmlSchemaType), false); if (baseType != null) { AddRef(refs, baseType); } if (particle != null) { Depends(particle, refs); } if (attributes != null) { for (int i = 0; i < attributes.Count; i++) { Depends(attributes[i], refs); } } } else if (t == typeof(XmlSchemaElement)) { XmlSchemaElement el = (XmlSchemaElement)item; if (!el.SubstitutionGroup.IsEmpty) { if (el.SubstitutionGroup.Namespace != XmlSchema.Namespace) { XmlSchemaElement? head = (XmlSchemaElement?)_schemas.Find(el.SubstitutionGroup, typeof(XmlSchemaElement), false); AddRef(refs, head); } } if (!el.RefName.IsEmpty) { el = (XmlSchemaElement)_schemas.Find(el.RefName, typeof(XmlSchemaElement), false)!; AddRef(refs, el); } else if (!el.SchemaTypeName.IsEmpty) { XmlSchemaType? type = (XmlSchemaType?)_schemas.Find(el.SchemaTypeName, typeof(XmlSchemaType), false); AddRef(refs, type); } else { Depends(el.SchemaType, refs); } } else if (t == typeof(XmlSchemaGroup)) { Depends(((XmlSchemaGroup)item).Particle!); } else if (t == typeof(XmlSchemaGroupRef)) { XmlSchemaGroup? group = (XmlSchemaGroup?)_schemas.Find(((XmlSchemaGroupRef)item).RefName, typeof(XmlSchemaGroup), false); AddRef(refs, group); } else if (typeof(XmlSchemaGroupBase).IsAssignableFrom(t)) { foreach (XmlSchemaObject o in ((XmlSchemaGroupBase)item).Items) { Depends(o, refs); } } else if (t == typeof(XmlSchemaAttributeGroupRef)) { XmlSchemaAttributeGroup? group = (XmlSchemaAttributeGroup?)_schemas.Find(((XmlSchemaAttributeGroupRef)item).RefName, typeof(XmlSchemaAttributeGroup), false); AddRef(refs, group); } else if (t == typeof(XmlSchemaAttributeGroup)) { foreach (XmlSchemaObject o in ((XmlSchemaAttributeGroup)item).Attributes) { Depends(o, refs); } } else if (t == typeof(XmlSchemaAttribute)) { XmlSchemaAttribute? at = (XmlSchemaAttribute)item; if (!at.RefName.IsEmpty) { at = (XmlSchemaAttribute?)_schemas.Find(at.RefName, typeof(XmlSchemaAttribute), false); AddRef(refs, at); } else if (!at.SchemaTypeName.IsEmpty) { XmlSchemaType? type = (XmlSchemaType?)_schemas.Find(at.SchemaTypeName, typeof(XmlSchemaType), false); AddRef(refs, type); } else { Depends(at.SchemaType, refs); } } if (typeof(XmlSchemaAnnotated).IsAssignableFrom(t)) { XmlAttribute[]? attrs = (XmlAttribute[]?)((XmlSchemaAnnotated)item).UnhandledAttributes; if (attrs != null) { for (int i = 0; i < attrs.Length; i++) { XmlAttribute attribute = attrs[i]; if (attribute.LocalName == Wsdl.ArrayType && attribute.NamespaceURI == Wsdl.Namespace) { XmlQualifiedName qname = TypeScope.ParseWsdlArrayType(attribute.Value, out _, item); XmlSchemaType? type = (XmlSchemaType?)_schemas.Find(qname, typeof(XmlSchemaType), false); AddRef(refs, type); } } } } } } }
-1
dotnet/runtime
66,235
Revert "Fix issues related to JsonSerializerOptions mutation and caching."
Reverts dotnet/runtime#65863 Fixes #66232
jkotas
2022-03-05T06:23:45Z
2022-03-05T14:08:07Z
17662fc30cfd4cc6e7dbba978a3fb512380c0b70
3ede8095da51d5d27a890020b61376155e9a61c2
Revert "Fix issues related to JsonSerializerOptions mutation and caching.". Reverts dotnet/runtime#65863 Fixes #66232
./src/coreclr/vm/stackwalk.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /* This is a poor man's implementation of virtual methods. */ /* The purpose of pCrawlFrame is to abstract (at least for the most common cases from the fact that not all methods are "framed" (basically all methods in "native" code are "unframed"). That way the job for the enumerator callbacks becomes much simpler (i.e. more transparent and hopefully less error prone). Two call-backs still need to distinguish between the two types: GC and exception. Both of these call-backs need to do really different things; for frameless methods they need to go through the codemanager and use the resp. apis. The reason for not implementing virtual methods on crawlFrame is solely because of the way exception handling is implemented (it does a "long jump" and bypasses the enumerator (stackWalker) when it finds a matching frame. By doing so couldn't properly destruct the dynamically created instance of CrawlFrame. */ #ifndef __stackwalk_h__ #define __stackwalk_h__ #include "eetwain.h" #include "stackwalktypes.h" class Frame; class CrawlFrame; class ICodeManager; class IJitManager; struct EE_ILEXCEPTION; class AppDomain; // This define controls handling of faults in managed code. If it is defined, // the exception is handled (retried, actually), with a FaultingExceptionFrame // on the stack. The FEF is used for unwinding. If not defined, the unwinding // uses the exception context. #define USE_FEF // to mark where code needs to be changed to eliminate the FEF #if defined(TARGET_X86) && !defined(TARGET_UNIX) #undef USE_FEF // Turn off the FEF use on x86. #define ELIMINATE_FEF #else #if defined(ELIMINATE_FEF) #undef ELIMINATE_FEF #endif #endif // TARGET_X86 && !TARGET_UNIX #if defined(FEATURE_EH_FUNCLETS) #define RECORD_RESUMABLE_FRAME_SP #endif //************************************************************************ // Enumerate all functions. //************************************************************************ /* This enumerator is meant to be used for the most common cases, i.e. to enumerate just all the functions of the requested thread. It is just a cover for the "real" enumerator. */ StackWalkAction StackWalkFunctions(Thread * thread, PSTACKWALKFRAMESCALLBACK pCallback, VOID * pData); /*<TODO>@ISSUE: Maybe use a define instead?</TODO> #define StackWalkFunctions(thread, callBack, userdata) thread->StackWalkFrames(METHODSONLY, (callBack),(userData)) */ class CrawlFrame { public: #ifdef TARGET_X86 friend StackWalkAction TAStackCrawlCallBack(CrawlFrame* pCf, void* data); #endif // TARGET_X86 //************************************************************************ // Functions available for the callbacks (using the current pCrawlFrame) //************************************************************************ /* Widely used/benign functions */ /* Is this a function? */ /* Returns either a MethodDesc* or NULL for "non-function" frames */ //<TODO>@TODO: what will it return for transition frames?</TODO> #ifdef FEATURE_INTERPRETER MethodDesc *GetFunction(); #else // FEATURE_INTERPRETER inline MethodDesc *GetFunction() { LIMITED_METHOD_DAC_CONTRACT; return pFunc; } #endif Assembly *GetAssembly(); /* Returns either a Frame * (for "framed items) or Returns NULL for frameless functions */ inline Frame* GetFrame() // will return NULL for "frameless methods" { LIMITED_METHOD_DAC_CONTRACT; _ASSERTE((int)isFrameless != 0xcc); if (isFrameless) return NULL; else return pFrame; } BOOL IsInCalleesFrames(LPVOID stackPointer); // Fetch the extra type argument passed in some cases PTR_VOID GetParamTypeArg(); /* Returns the "this" pointer of the method of the current frame -- at least in some cases. Returns NULL if the current frame does not have a method, or that method is not an instance method of a class type. Otherwise, the semantics currently depend, unfortunately, on the architecture. On non-x86 architectures, should only be called for methods where the generic instantiation context is found via the this pointer (so that this information will be encoded in the GC Info). On x86, can be called for this case, or if the method is synchronized. */ OBJECTREF GetThisPointer(); /* Returns ambient Stack pointer for this crawlframe. Must be a frameless method. Returns NULL if not available (includes prolog + epilog). This is safe to call on all methods, but it may return garbage if the method does not have an ambient SP (eg, ebp-based methods). x86 is the only platform using ambient SP. */ TADDR GetAmbientSPFromCrawlFrame(); void GetExactGenericInstantiations(Instantiation *pClassInst, Instantiation *pMethodInst); /* Returns extra information required to reconstruct exact generic parameters, if any. Returns NULL if - no extra information is required (i.e. the code is non-shared, which you can tell from the MethodDesc) - the extra information is not available (i.e. optimized away or codegen problem) Returns a MethodTable if the pMD returned by GetFunction satisfies RequiresInstMethodTableArg, and returns a MethodDesc if the pMD returned by GetFunction satisfies RequiresInstMethodDescArg. These together carry the exact instantiation information. */ PTR_VOID GetExactGenericArgsToken(); inline CodeManState * GetCodeManState() { LIMITED_METHOD_DAC_CONTRACT; return & codeManState; } /* IF YOU USE ANY OF THE SUBSEQUENT FUNCTIONS, YOU NEED TO REALLY UNDERSTAND THE STACK-WALKER (INCLUDING UNWINDING OF METHODS IN MANAGED NATIVE CODE)! YOU ALSO NEED TO UNDERSTAND THAT THESE FUNCTIONS MIGHT CHANGE ON AN AS-NEED BASIS. */ /* The rest are meant to be used only by the exception catcher and the GC call-back */ /* Is currently a frame available? */ /* conceptually returns (GetFrame(pCrawlFrame) == NULL) */ inline bool IsFrameless() { LIMITED_METHOD_DAC_CONTRACT; _ASSERTE((int)isFrameless != 0xcc); return isFrameless; } /* Is it the current active (top-most) frame */ inline bool IsActiveFrame() { LIMITED_METHOD_DAC_CONTRACT; _ASSERTE((int)isFirst != 0xcc); return isFirst; } /* Is it the current active function (top-most frame) asserts for non-functions, should be used for managed native code only */ inline bool IsActiveFunc() { LIMITED_METHOD_DAC_CONTRACT; _ASSERTE((int)isFirst != 0xcc); return (pFunc && isFirst); } /* Is it the current active function (top-most frame) which faulted or threw an exception ? asserts for non-functions, should be used for managed native code only */ bool IsInterrupted() { LIMITED_METHOD_DAC_CONTRACT; _ASSERTE((int)isInterrupted != 0xcc); return (pFunc && isInterrupted /* && isFrameless?? */); } /* Is it the current active function (top-most frame) which faulted ? asserts for non-functions, should be used for managed native code only */ bool HasFaulted() { LIMITED_METHOD_DAC_CONTRACT; _ASSERTE((int)hasFaulted != 0xcc); return (pFunc && hasFaulted /* && isFrameless?? */); } /* Is this CrawlFrame just marking that we're in native code? Such frames are only provided when the stackwalk is inited w/ NOTIFY_ON_U2M_TRANSITIONS. The only use of these crawlframes is to get the Regdisplay. */ bool IsNativeMarker() { LIMITED_METHOD_DAC_CONTRACT; _ASSERTE((int)isNativeMarker != 0xcc); return isNativeMarker; } /* x86 does not always push a FaultingExceptionFrame on the stack when there is a native exception (e.g. a breakpoint). In this case, it relies on the CONTEXT stored on the ExInfo to resume the stackwalk at the managed stack frame which has faulted. This flag is set when the stackwalker is stopped at such a no-explicit-frame transition. Conceptually this is just like stopping at a transition frame. Note that the stackwalker only stops at no-frame transition if NOTIFY_ON_NO_FRAME_TRANSITIONS is set. */ bool IsNoFrameTransition() { LIMITED_METHOD_DAC_CONTRACT; _ASSERTE((int)isNoFrameTransition != 0xcc); return isNoFrameTransition; } // A no-frame transition is one protected by an ExInfo. It's an optimization on x86 to avoid pushing a // FaultingExceptionFrame (FEF). Thus, for all intents and purposes, we should treat a no-frame // transition as a FEF. This function returns a stack address for the no-frame transition to substitute // as the frame address of a FEF. It's currently only used by the debugger stackwalker. TADDR GetNoFrameTransitionMarker() { LIMITED_METHOD_DAC_CONTRACT; _ASSERTE((int)isNoFrameTransition != 0xcc); return (isNoFrameTransition ? taNoFrameTransitionMarker : NULL); } /* Has the IP been adjusted to a point where it is safe to do GC ? (for OutOfLineThrownExceptionFrame) asserts for non-functions, should be used for managed native code only */ bool IsIPadjusted() { LIMITED_METHOD_DAC_CONTRACT; _ASSERTE((int)isIPadjusted != 0xcc); return (pFunc && isIPadjusted /* && isFrameless?? */); } /* Gets the ICodeMangerFlags for the current frame */ unsigned GetCodeManagerFlags() { CONTRACTL { NOTHROW; GC_NOTRIGGER; SUPPORTS_DAC; } CONTRACTL_END; unsigned flags = 0; if (IsActiveFunc()) flags |= ActiveStackFrame; if (IsInterrupted()) { flags |= ExecutionAborted; if (!HasFaulted() && !IsIPadjusted()) { _ASSERTE(!(flags & ActiveStackFrame)); flags |= AbortingCall; } } #if defined(FEATURE_EH_FUNCLETS) if (ShouldParentToFuncletSkipReportingGCReferences()) { flags |= ParentOfFuncletStackFrame; } #endif // defined(FEATURE_EH_FUNCLETS) return flags; } /* Is this frame at a safe spot for GC? */ bool IsGcSafe(); #if defined(TARGET_ARM) || defined(TARGET_ARM64) bool HasTailCalls(); #endif // TARGET_ARM || TARGET_ARM64 PREGDISPLAY GetRegisterSet() { LIMITED_METHOD_DAC_CONTRACT; // We would like to make the following assertion, but it is legitimately // violated when we perform a crawl to find the return address for a hijack. // _ASSERTE(isFrameless); return pRD; } EECodeInfo * GetCodeInfo() { LIMITED_METHOD_DAC_CONTRACT; // This assumes that CrawlFrame is host-only structure with DACCESS_COMPILE // and thus it always returns the host address. return &codeInfo; } GCInfoToken GetGCInfoToken() { LIMITED_METHOD_DAC_CONTRACT; _ASSERTE((int)isFrameless != 0xcc); _ASSERTE(isFrameless); return codeInfo.GetGCInfoToken(); } PTR_VOID GetGCInfo() { LIMITED_METHOD_DAC_CONTRACT; _ASSERTE((int)isFrameless != 0xcc); _ASSERTE(isFrameless); return codeInfo.GetGCInfo(); } const METHODTOKEN& GetMethodToken() { LIMITED_METHOD_DAC_CONTRACT; _ASSERTE((int)isFrameless != 0xcc); _ASSERTE(isFrameless); return codeInfo.GetMethodToken(); } unsigned GetRelOffset() { LIMITED_METHOD_DAC_CONTRACT; _ASSERTE((int)isFrameless != 0xcc); _ASSERTE(isFrameless); return codeInfo.GetRelOffset(); } IJitManager* GetJitManager() { LIMITED_METHOD_DAC_CONTRACT; _ASSERTE((int)isFrameless != 0xcc); _ASSERTE(isFrameless); return codeInfo.GetJitManager(); } ICodeManager* GetCodeManager() { LIMITED_METHOD_DAC_CONTRACT; _ASSERTE((int)isFrameless != 0xcc); _ASSERTE(isFrameless); return codeInfo.GetCodeManager(); } inline StackwalkCacheEntry* GetStackwalkCacheEntry() { LIMITED_METHOD_CONTRACT; _ASSERTE (isCachedMethod != stackWalkCache.IsEmpty()); if (isCachedMethod && stackWalkCache.m_CacheEntry.IsSafeToUseCache()) { return &(stackWalkCache.m_CacheEntry); } else { return NULL; } } void CheckGSCookies(); inline Thread* GetThread() { LIMITED_METHOD_CONTRACT; return pThread; } #if defined(FEATURE_EH_FUNCLETS) bool IsFunclet() { WRAPPER_NO_CONTRACT; if (!IsFrameless()) return false; return !!codeInfo.IsFunclet(); } bool IsFilterFunclet(); // Indicates if the funclet has already reported GC // references (or not). This will return true if // we come across the parent frame of a funclet // that is active on the stack. bool ShouldParentToFuncletSkipReportingGCReferences() { LIMITED_METHOD_CONTRACT; return fShouldParentToFuncletSkipReportingGCReferences; } bool ShouldCrawlframeReportGCReferences() { LIMITED_METHOD_CONTRACT; return fShouldCrawlframeReportGCReferences; } bool ShouldParentToFuncletUseUnwindTargetLocationForGCReporting() { LIMITED_METHOD_CONTRACT; return fShouldParentFrameUseUnwindTargetPCforGCReporting; } const EE_ILEXCEPTION_CLAUSE& GetEHClauseForCatch() { return ehClauseForCatch; } #endif // FEATURE_EH_FUNCLETS protected: // CrawlFrames are temporarily created by the enumerator. // Do not create one from C++. This protected constructor polices this rule. CrawlFrame(); void SetCurGSCookie(GSCookie * pGSCookie); private: friend class Thread; friend class EECodeManager; friend class StackFrameIterator; #ifdef FEATURE_EH_FUNCLETS friend class ExceptionTracker; #endif // FEATURE_EH_FUNCLETS CodeManState codeManState; bool isFrameless; bool isFirst; // The next three fields are only valid for managed stack frames. They are set using attributes // on explicit frames, and they are reset after processing each managed stack frame. bool isInterrupted; bool hasFaulted; bool isIPadjusted; bool isNativeMarker; bool isProfilerDoStackSnapshot; bool isNoFrameTransition; TADDR taNoFrameTransitionMarker; // see code:CrawlFrame.GetNoFrameTransitionMarker PTR_Frame pFrame; MethodDesc *pFunc; // the rest is only used for "frameless methods" PREGDISPLAY pRD; // "thread context"/"virtual register set" EECodeInfo codeInfo; #if defined(FEATURE_EH_FUNCLETS) bool isFilterFunclet; bool isFilterFuncletCached; bool fShouldParentToFuncletSkipReportingGCReferences; bool fShouldCrawlframeReportGCReferences; bool fShouldParentFrameUseUnwindTargetPCforGCReporting; EE_ILEXCEPTION_CLAUSE ehClauseForCatch; #endif //FEATURE_EH_FUNCLETS Thread* pThread; // fields used for stackwalk cache BOOL isCachedMethod; StackwalkCache stackWalkCache; GSCookie *pCurGSCookie; GSCookie *pFirstGSCookie; friend class Frame; // added to allow 'friend void CrawlFrame::GotoNextFrame();' declaration in class Frame, frames.h void GotoNextFrame(); }; void GcEnumObject(LPVOID pData, OBJECTREF *pObj); StackWalkAction GcStackCrawlCallBack(CrawlFrame* pCF, VOID* pData); #if defined(ELIMINATE_FEF) //****************************************************************************** // This class is used to help use exception context records to resync a // stackwalk, when managed code has generated an exception (eg, AV, zerodiv.,,) // Such an exception causes a transition from the managed code into unmanaged // OS and runtime code, but without the benefit of any Frame. This code helps // the stackwalker simulate the effect that such a frame would have. // In particular, this class has methods to walk the chain of ExInfos, looking // for records with pContext pointers with certain characteristics. The // characteristics that are important are the location in the stack (ie, is a // given pContext relevant at a particular point in the stack walk), and // whether the pContext was generated in managed code. //****************************************************************************** class ExInfoWalker { public: ExInfoWalker() : m_pExInfo(0) { SUPPORTS_DAC; } void Init (ExInfo *pExInfo) { SUPPORTS_DAC; m_pExInfo = pExInfo; } // Skip one ExInfo. void WalkOne(); // Attempt to find an ExInfo with a pContext that is higher (older) than // a given minimum location. void WalkToPosition(TADDR taMinimum, BOOL bPopFrames); // Attempt to find an ExInfo with a pContext that has an IP in managed code. void WalkToManaged(); // Return current ExInfo's m_pContext, or NULL if no m_pExInfo. PTR_CONTEXT GetContext() { SUPPORTS_DAC; return m_pExInfo ? m_pExInfo->m_pContext : NULL; } // Useful to see if there is more on the ExInfo chain. ExInfo* GetExInfo() { SUPPORTS_DAC; return m_pExInfo; } // helper functions for retrieving information from the exception CONTEXT TADDR GetSPFromContext() { LIMITED_METHOD_CONTRACT; SUPPORTS_DAC; return dac_cast<TADDR>((m_pExInfo && m_pExInfo->m_pContext) ? GetSP(m_pExInfo->m_pContext) : PTR_NULL); } TADDR GetEBPFromContext() { LIMITED_METHOD_CONTRACT; SUPPORTS_DAC; return dac_cast<TADDR>((m_pExInfo && m_pExInfo->m_pContext) ? GetFP(m_pExInfo->m_pContext) : PTR_NULL); } DWORD GetFault() { SUPPORTS_DAC; return m_pExInfo ? m_pExInfo->m_pExceptionRecord->ExceptionCode : 0; } private: ExInfo *m_pExInfo; }; // class ExInfoWalker #endif // ELIMINATE_FEF //--------------------------------------------------------------------------------------- // // This iterator class walks the stack of a managed thread. Where the iterator stops depends on the // stackwalk flags. // // Notes: // This class works both in-process and out-of-process (e.g. DAC). // class StackFrameIterator { public: // This constructor is for the usage pattern of creating an uninitialized StackFrameIterator and then // calling Init() on it. StackFrameIterator(void); // This constructor is for the usage pattern of creating an initialized StackFrameIterator and then // calling ResetRegDisp() on it. StackFrameIterator(Thread * pThread, PTR_Frame pFrame, ULONG32 flags); // // We should consider merging Init() and ResetRegDisp(). // // Initialize the iterator. Note that the iterator has thread-affinity, // and the stackwalk flags cannot be changed once the iterator is created. BOOL Init(Thread * pThread, PTR_Frame pFrame, PREGDISPLAY pRegDisp, ULONG32 flags); // Reset the iterator to the specified REGDISPLAY. The caller must ensure that the REGDISPLAY is valid. BOOL ResetRegDisp(PREGDISPLAY pRegDisp, bool fIsFirst); // @dbgtodo inspection - This function should be removed once the Windows debuggers stop using the old DAC API. void SetIsFirstFrame(bool isFirst) { LIMITED_METHOD_CONTRACT; m_crawl.isFirst = isFirst; } // whether the iterator has reached the root of the stack or not BOOL IsValid(void); // advance to the next frame according to the stackwalk flags StackWalkAction Next(void); enum FrameState { SFITER_UNINITIALIZED, // uninitialized SFITER_FRAMELESS_METHOD, // managed stack frame SFITER_FRAME_FUNCTION, // explicit frame SFITER_SKIPPED_FRAME_FUNCTION, // skipped explicit frame SFITER_NO_FRAME_TRANSITION, // no-frame transition (currently used for ExInfo only) SFITER_NATIVE_MARKER_FRAME, // the native stack frame immediately below (stack grows up) // a managed stack region SFITER_INITIAL_NATIVE_CONTEXT, // initial native seed CONTEXT SFITER_DONE, // the iterator has reached the end of the stack }; FrameState GetFrameState() {LIMITED_METHOD_DAC_CONTRACT; return m_frameState;} CrawlFrame m_crawl; #if defined(_DEBUG) // used in logging UINT32 m_uFramesProcessed; #endif // _DEBUG private: // This is a helper for the two constructors. void CommonCtor(Thread * pThread, PTR_Frame pFrame, ULONG32 flags); // Reset the CrawlFrame owned by the iterator. Used by both Init() and ResetRegDisp(). void ResetCrawlFrame(void); // Check whether we should stop at the current frame given the stackwalk flags. // If not, continue advancing to the next frame. StackWalkAction Filter(void); // Advance to the next frame regardless of the stackwalk flags. This is used by Next() and Filter(). StackWalkAction NextRaw(void); // sync the REGDISPLAY to the current CONTEXT void UpdateRegDisp(void); // Check whether the IP is managed code. This function updates the following fields on CrawlFrame: // JitManagerInstance and isFrameless. void ProcessIp(PCODE Ip); // Update the CrawlFrame to represent where we have stopped. // This is called after advancing to a new frame. void ProcessCurrentFrame(void); // If an explicit frame is allocated in a managed stack frame (e.g. an inlined pinvoke call), // we may have skipped an explicit frame. This function checks for them. BOOL CheckForSkippedFrames(void); // Perform the necessary tasks before stopping at a managed stack frame. This is mostly validation work. void PreProcessingForManagedFrames(void); // Perform the necessary tasks after stopping at a managed stack frame and unwinding to its caller. // This includes advancing the ExInfo and checking whether the new IP is managed. void PostProcessingForManagedFrames(void); // Perform the necessary tasks after stopping at a no-frame transition. This includes loading // the CONTEXT stored in the ExInfo and updating the REGDISPLAY to the faulting managed stack frame. void PostProcessingForNoFrameTransition(void); #if defined(FEATURE_EH_FUNCLETS) void ResetGCRefReportingState(bool ResetOnlyIntermediaryState = false) { LIMITED_METHOD_CONTRACT; if (!ResetOnlyIntermediaryState) { m_sfFuncletParent = StackFrame(); m_fProcessNonFilterFunclet = false; } m_sfIntermediaryFuncletParent = StackFrame(); m_fProcessIntermediaryNonFilterFunclet = false; } #endif // defined(FEATURE_EH_FUNCLETS) // Iteration state. FrameState m_frameState; // Initial state. Must be preserved for restarting. Thread * m_pThread; // Thread on which to walk. PTR_Frame m_pStartFrame; // Frame* passed to Init // This is the real starting explicit frame. If m_pStartFrame is NULL, // then this is equal to m_pThread->GetFrame(). Otherwise this is equal to m_pStartFrame. INDEBUG(PTR_Frame m_pRealStartFrame); ULONG32 m_flags; // StackWalkFrames flags. ICodeManagerFlags m_codeManFlags; ExecutionManager::ScanFlag m_scanFlag; // the following fields are used to cache information about a managed stack frame // when we need to stop for skipped explicit frames EECodeInfo m_cachedCodeInfo; GSCookie * m_pCachedGSCookie; #if defined(ELIMINATE_FEF) ExInfoWalker m_exInfoWalk; #endif // ELIMINATE_FEF #if defined(FEATURE_EH_FUNCLETS) // used in funclet-skipping StackFrame m_sfParent; // Used in GC reference enumeration mode StackFrame m_sfFuncletParent; bool m_fProcessNonFilterFunclet; StackFrame m_sfIntermediaryFuncletParent; bool m_fProcessIntermediaryNonFilterFunclet; bool m_fDidFuncletReportGCReferences; #endif // FEATURE_EH_FUNCLETS #if defined(RECORD_RESUMABLE_FRAME_SP) LPVOID m_pvResumableFrameTargetSP; #endif // RECORD_RESUMABLE_FRAME_SP }; void SetUpRegdisplayForStackWalk(Thread * pThread, T_CONTEXT * pContext, REGDISPLAY * pRegdisplay); #endif
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /* This is a poor man's implementation of virtual methods. */ /* The purpose of pCrawlFrame is to abstract (at least for the most common cases from the fact that not all methods are "framed" (basically all methods in "native" code are "unframed"). That way the job for the enumerator callbacks becomes much simpler (i.e. more transparent and hopefully less error prone). Two call-backs still need to distinguish between the two types: GC and exception. Both of these call-backs need to do really different things; for frameless methods they need to go through the codemanager and use the resp. apis. The reason for not implementing virtual methods on crawlFrame is solely because of the way exception handling is implemented (it does a "long jump" and bypasses the enumerator (stackWalker) when it finds a matching frame. By doing so couldn't properly destruct the dynamically created instance of CrawlFrame. */ #ifndef __stackwalk_h__ #define __stackwalk_h__ #include "eetwain.h" #include "stackwalktypes.h" class Frame; class CrawlFrame; class ICodeManager; class IJitManager; struct EE_ILEXCEPTION; class AppDomain; // This define controls handling of faults in managed code. If it is defined, // the exception is handled (retried, actually), with a FaultingExceptionFrame // on the stack. The FEF is used for unwinding. If not defined, the unwinding // uses the exception context. #define USE_FEF // to mark where code needs to be changed to eliminate the FEF #if defined(TARGET_X86) && !defined(TARGET_UNIX) #undef USE_FEF // Turn off the FEF use on x86. #define ELIMINATE_FEF #else #if defined(ELIMINATE_FEF) #undef ELIMINATE_FEF #endif #endif // TARGET_X86 && !TARGET_UNIX #if defined(FEATURE_EH_FUNCLETS) #define RECORD_RESUMABLE_FRAME_SP #endif //************************************************************************ // Enumerate all functions. //************************************************************************ /* This enumerator is meant to be used for the most common cases, i.e. to enumerate just all the functions of the requested thread. It is just a cover for the "real" enumerator. */ StackWalkAction StackWalkFunctions(Thread * thread, PSTACKWALKFRAMESCALLBACK pCallback, VOID * pData); /*<TODO>@ISSUE: Maybe use a define instead?</TODO> #define StackWalkFunctions(thread, callBack, userdata) thread->StackWalkFrames(METHODSONLY, (callBack),(userData)) */ class CrawlFrame { public: #ifdef TARGET_X86 friend StackWalkAction TAStackCrawlCallBack(CrawlFrame* pCf, void* data); #endif // TARGET_X86 //************************************************************************ // Functions available for the callbacks (using the current pCrawlFrame) //************************************************************************ /* Widely used/benign functions */ /* Is this a function? */ /* Returns either a MethodDesc* or NULL for "non-function" frames */ //<TODO>@TODO: what will it return for transition frames?</TODO> #ifdef FEATURE_INTERPRETER MethodDesc *GetFunction(); #else // FEATURE_INTERPRETER inline MethodDesc *GetFunction() { LIMITED_METHOD_DAC_CONTRACT; return pFunc; } #endif Assembly *GetAssembly(); /* Returns either a Frame * (for "framed items) or Returns NULL for frameless functions */ inline Frame* GetFrame() // will return NULL for "frameless methods" { LIMITED_METHOD_DAC_CONTRACT; _ASSERTE((int)isFrameless != 0xcc); if (isFrameless) return NULL; else return pFrame; } BOOL IsInCalleesFrames(LPVOID stackPointer); // Fetch the extra type argument passed in some cases PTR_VOID GetParamTypeArg(); /* Returns the "this" pointer of the method of the current frame -- at least in some cases. Returns NULL if the current frame does not have a method, or that method is not an instance method of a class type. Otherwise, the semantics currently depend, unfortunately, on the architecture. On non-x86 architectures, should only be called for methods where the generic instantiation context is found via the this pointer (so that this information will be encoded in the GC Info). On x86, can be called for this case, or if the method is synchronized. */ OBJECTREF GetThisPointer(); /* Returns ambient Stack pointer for this crawlframe. Must be a frameless method. Returns NULL if not available (includes prolog + epilog). This is safe to call on all methods, but it may return garbage if the method does not have an ambient SP (eg, ebp-based methods). x86 is the only platform using ambient SP. */ TADDR GetAmbientSPFromCrawlFrame(); void GetExactGenericInstantiations(Instantiation *pClassInst, Instantiation *pMethodInst); /* Returns extra information required to reconstruct exact generic parameters, if any. Returns NULL if - no extra information is required (i.e. the code is non-shared, which you can tell from the MethodDesc) - the extra information is not available (i.e. optimized away or codegen problem) Returns a MethodTable if the pMD returned by GetFunction satisfies RequiresInstMethodTableArg, and returns a MethodDesc if the pMD returned by GetFunction satisfies RequiresInstMethodDescArg. These together carry the exact instantiation information. */ PTR_VOID GetExactGenericArgsToken(); inline CodeManState * GetCodeManState() { LIMITED_METHOD_DAC_CONTRACT; return & codeManState; } /* IF YOU USE ANY OF THE SUBSEQUENT FUNCTIONS, YOU NEED TO REALLY UNDERSTAND THE STACK-WALKER (INCLUDING UNWINDING OF METHODS IN MANAGED NATIVE CODE)! YOU ALSO NEED TO UNDERSTAND THAT THESE FUNCTIONS MIGHT CHANGE ON AN AS-NEED BASIS. */ /* The rest are meant to be used only by the exception catcher and the GC call-back */ /* Is currently a frame available? */ /* conceptually returns (GetFrame(pCrawlFrame) == NULL) */ inline bool IsFrameless() { LIMITED_METHOD_DAC_CONTRACT; _ASSERTE((int)isFrameless != 0xcc); return isFrameless; } /* Is it the current active (top-most) frame */ inline bool IsActiveFrame() { LIMITED_METHOD_DAC_CONTRACT; _ASSERTE((int)isFirst != 0xcc); return isFirst; } /* Is it the current active function (top-most frame) asserts for non-functions, should be used for managed native code only */ inline bool IsActiveFunc() { LIMITED_METHOD_DAC_CONTRACT; _ASSERTE((int)isFirst != 0xcc); return (pFunc && isFirst); } /* Is it the current active function (top-most frame) which faulted or threw an exception ? asserts for non-functions, should be used for managed native code only */ bool IsInterrupted() { LIMITED_METHOD_DAC_CONTRACT; _ASSERTE((int)isInterrupted != 0xcc); return (pFunc && isInterrupted /* && isFrameless?? */); } /* Is it the current active function (top-most frame) which faulted ? asserts for non-functions, should be used for managed native code only */ bool HasFaulted() { LIMITED_METHOD_DAC_CONTRACT; _ASSERTE((int)hasFaulted != 0xcc); return (pFunc && hasFaulted /* && isFrameless?? */); } /* Is this CrawlFrame just marking that we're in native code? Such frames are only provided when the stackwalk is inited w/ NOTIFY_ON_U2M_TRANSITIONS. The only use of these crawlframes is to get the Regdisplay. */ bool IsNativeMarker() { LIMITED_METHOD_DAC_CONTRACT; _ASSERTE((int)isNativeMarker != 0xcc); return isNativeMarker; } /* x86 does not always push a FaultingExceptionFrame on the stack when there is a native exception (e.g. a breakpoint). In this case, it relies on the CONTEXT stored on the ExInfo to resume the stackwalk at the managed stack frame which has faulted. This flag is set when the stackwalker is stopped at such a no-explicit-frame transition. Conceptually this is just like stopping at a transition frame. Note that the stackwalker only stops at no-frame transition if NOTIFY_ON_NO_FRAME_TRANSITIONS is set. */ bool IsNoFrameTransition() { LIMITED_METHOD_DAC_CONTRACT; _ASSERTE((int)isNoFrameTransition != 0xcc); return isNoFrameTransition; } // A no-frame transition is one protected by an ExInfo. It's an optimization on x86 to avoid pushing a // FaultingExceptionFrame (FEF). Thus, for all intents and purposes, we should treat a no-frame // transition as a FEF. This function returns a stack address for the no-frame transition to substitute // as the frame address of a FEF. It's currently only used by the debugger stackwalker. TADDR GetNoFrameTransitionMarker() { LIMITED_METHOD_DAC_CONTRACT; _ASSERTE((int)isNoFrameTransition != 0xcc); return (isNoFrameTransition ? taNoFrameTransitionMarker : NULL); } /* Has the IP been adjusted to a point where it is safe to do GC ? (for OutOfLineThrownExceptionFrame) asserts for non-functions, should be used for managed native code only */ bool IsIPadjusted() { LIMITED_METHOD_DAC_CONTRACT; _ASSERTE((int)isIPadjusted != 0xcc); return (pFunc && isIPadjusted /* && isFrameless?? */); } /* Gets the ICodeMangerFlags for the current frame */ unsigned GetCodeManagerFlags() { CONTRACTL { NOTHROW; GC_NOTRIGGER; SUPPORTS_DAC; } CONTRACTL_END; unsigned flags = 0; if (IsActiveFunc()) flags |= ActiveStackFrame; if (IsInterrupted()) { flags |= ExecutionAborted; if (!HasFaulted() && !IsIPadjusted()) { _ASSERTE(!(flags & ActiveStackFrame)); flags |= AbortingCall; } } #if defined(FEATURE_EH_FUNCLETS) if (ShouldParentToFuncletSkipReportingGCReferences()) { flags |= ParentOfFuncletStackFrame; } #endif // defined(FEATURE_EH_FUNCLETS) return flags; } /* Is this frame at a safe spot for GC? */ bool IsGcSafe(); #if defined(TARGET_ARM) || defined(TARGET_ARM64) bool HasTailCalls(); #endif // TARGET_ARM || TARGET_ARM64 PREGDISPLAY GetRegisterSet() { LIMITED_METHOD_DAC_CONTRACT; // We would like to make the following assertion, but it is legitimately // violated when we perform a crawl to find the return address for a hijack. // _ASSERTE(isFrameless); return pRD; } EECodeInfo * GetCodeInfo() { LIMITED_METHOD_DAC_CONTRACT; // This assumes that CrawlFrame is host-only structure with DACCESS_COMPILE // and thus it always returns the host address. return &codeInfo; } GCInfoToken GetGCInfoToken() { LIMITED_METHOD_DAC_CONTRACT; _ASSERTE((int)isFrameless != 0xcc); _ASSERTE(isFrameless); return codeInfo.GetGCInfoToken(); } PTR_VOID GetGCInfo() { LIMITED_METHOD_DAC_CONTRACT; _ASSERTE((int)isFrameless != 0xcc); _ASSERTE(isFrameless); return codeInfo.GetGCInfo(); } const METHODTOKEN& GetMethodToken() { LIMITED_METHOD_DAC_CONTRACT; _ASSERTE((int)isFrameless != 0xcc); _ASSERTE(isFrameless); return codeInfo.GetMethodToken(); } unsigned GetRelOffset() { LIMITED_METHOD_DAC_CONTRACT; _ASSERTE((int)isFrameless != 0xcc); _ASSERTE(isFrameless); return codeInfo.GetRelOffset(); } IJitManager* GetJitManager() { LIMITED_METHOD_DAC_CONTRACT; _ASSERTE((int)isFrameless != 0xcc); _ASSERTE(isFrameless); return codeInfo.GetJitManager(); } ICodeManager* GetCodeManager() { LIMITED_METHOD_DAC_CONTRACT; _ASSERTE((int)isFrameless != 0xcc); _ASSERTE(isFrameless); return codeInfo.GetCodeManager(); } inline StackwalkCacheEntry* GetStackwalkCacheEntry() { LIMITED_METHOD_CONTRACT; _ASSERTE (isCachedMethod != stackWalkCache.IsEmpty()); if (isCachedMethod && stackWalkCache.m_CacheEntry.IsSafeToUseCache()) { return &(stackWalkCache.m_CacheEntry); } else { return NULL; } } void CheckGSCookies(); inline Thread* GetThread() { LIMITED_METHOD_CONTRACT; return pThread; } #if defined(FEATURE_EH_FUNCLETS) bool IsFunclet() { WRAPPER_NO_CONTRACT; if (!IsFrameless()) return false; return !!codeInfo.IsFunclet(); } bool IsFilterFunclet(); // Indicates if the funclet has already reported GC // references (or not). This will return true if // we come across the parent frame of a funclet // that is active on the stack. bool ShouldParentToFuncletSkipReportingGCReferences() { LIMITED_METHOD_CONTRACT; return fShouldParentToFuncletSkipReportingGCReferences; } bool ShouldCrawlframeReportGCReferences() { LIMITED_METHOD_CONTRACT; return fShouldCrawlframeReportGCReferences; } bool ShouldParentToFuncletUseUnwindTargetLocationForGCReporting() { LIMITED_METHOD_CONTRACT; return fShouldParentFrameUseUnwindTargetPCforGCReporting; } const EE_ILEXCEPTION_CLAUSE& GetEHClauseForCatch() { return ehClauseForCatch; } #endif // FEATURE_EH_FUNCLETS protected: // CrawlFrames are temporarily created by the enumerator. // Do not create one from C++. This protected constructor polices this rule. CrawlFrame(); void SetCurGSCookie(GSCookie * pGSCookie); private: friend class Thread; friend class EECodeManager; friend class StackFrameIterator; #ifdef FEATURE_EH_FUNCLETS friend class ExceptionTracker; #endif // FEATURE_EH_FUNCLETS CodeManState codeManState; bool isFrameless; bool isFirst; // The next three fields are only valid for managed stack frames. They are set using attributes // on explicit frames, and they are reset after processing each managed stack frame. bool isInterrupted; bool hasFaulted; bool isIPadjusted; bool isNativeMarker; bool isProfilerDoStackSnapshot; bool isNoFrameTransition; TADDR taNoFrameTransitionMarker; // see code:CrawlFrame.GetNoFrameTransitionMarker PTR_Frame pFrame; MethodDesc *pFunc; // the rest is only used for "frameless methods" PREGDISPLAY pRD; // "thread context"/"virtual register set" EECodeInfo codeInfo; #if defined(FEATURE_EH_FUNCLETS) bool isFilterFunclet; bool isFilterFuncletCached; bool fShouldParentToFuncletSkipReportingGCReferences; bool fShouldCrawlframeReportGCReferences; bool fShouldParentFrameUseUnwindTargetPCforGCReporting; EE_ILEXCEPTION_CLAUSE ehClauseForCatch; #endif //FEATURE_EH_FUNCLETS Thread* pThread; // fields used for stackwalk cache BOOL isCachedMethod; StackwalkCache stackWalkCache; GSCookie *pCurGSCookie; GSCookie *pFirstGSCookie; friend class Frame; // added to allow 'friend void CrawlFrame::GotoNextFrame();' declaration in class Frame, frames.h void GotoNextFrame(); }; void GcEnumObject(LPVOID pData, OBJECTREF *pObj); StackWalkAction GcStackCrawlCallBack(CrawlFrame* pCF, VOID* pData); #if defined(ELIMINATE_FEF) //****************************************************************************** // This class is used to help use exception context records to resync a // stackwalk, when managed code has generated an exception (eg, AV, zerodiv.,,) // Such an exception causes a transition from the managed code into unmanaged // OS and runtime code, but without the benefit of any Frame. This code helps // the stackwalker simulate the effect that such a frame would have. // In particular, this class has methods to walk the chain of ExInfos, looking // for records with pContext pointers with certain characteristics. The // characteristics that are important are the location in the stack (ie, is a // given pContext relevant at a particular point in the stack walk), and // whether the pContext was generated in managed code. //****************************************************************************** class ExInfoWalker { public: ExInfoWalker() : m_pExInfo(0) { SUPPORTS_DAC; } void Init (ExInfo *pExInfo) { SUPPORTS_DAC; m_pExInfo = pExInfo; } // Skip one ExInfo. void WalkOne(); // Attempt to find an ExInfo with a pContext that is higher (older) than // a given minimum location. void WalkToPosition(TADDR taMinimum, BOOL bPopFrames); // Attempt to find an ExInfo with a pContext that has an IP in managed code. void WalkToManaged(); // Return current ExInfo's m_pContext, or NULL if no m_pExInfo. PTR_CONTEXT GetContext() { SUPPORTS_DAC; return m_pExInfo ? m_pExInfo->m_pContext : NULL; } // Useful to see if there is more on the ExInfo chain. ExInfo* GetExInfo() { SUPPORTS_DAC; return m_pExInfo; } // helper functions for retrieving information from the exception CONTEXT TADDR GetSPFromContext() { LIMITED_METHOD_CONTRACT; SUPPORTS_DAC; return dac_cast<TADDR>((m_pExInfo && m_pExInfo->m_pContext) ? GetSP(m_pExInfo->m_pContext) : PTR_NULL); } TADDR GetEBPFromContext() { LIMITED_METHOD_CONTRACT; SUPPORTS_DAC; return dac_cast<TADDR>((m_pExInfo && m_pExInfo->m_pContext) ? GetFP(m_pExInfo->m_pContext) : PTR_NULL); } DWORD GetFault() { SUPPORTS_DAC; return m_pExInfo ? m_pExInfo->m_pExceptionRecord->ExceptionCode : 0; } private: ExInfo *m_pExInfo; }; // class ExInfoWalker #endif // ELIMINATE_FEF //--------------------------------------------------------------------------------------- // // This iterator class walks the stack of a managed thread. Where the iterator stops depends on the // stackwalk flags. // // Notes: // This class works both in-process and out-of-process (e.g. DAC). // class StackFrameIterator { public: // This constructor is for the usage pattern of creating an uninitialized StackFrameIterator and then // calling Init() on it. StackFrameIterator(void); // This constructor is for the usage pattern of creating an initialized StackFrameIterator and then // calling ResetRegDisp() on it. StackFrameIterator(Thread * pThread, PTR_Frame pFrame, ULONG32 flags); // // We should consider merging Init() and ResetRegDisp(). // // Initialize the iterator. Note that the iterator has thread-affinity, // and the stackwalk flags cannot be changed once the iterator is created. BOOL Init(Thread * pThread, PTR_Frame pFrame, PREGDISPLAY pRegDisp, ULONG32 flags); // Reset the iterator to the specified REGDISPLAY. The caller must ensure that the REGDISPLAY is valid. BOOL ResetRegDisp(PREGDISPLAY pRegDisp, bool fIsFirst); // @dbgtodo inspection - This function should be removed once the Windows debuggers stop using the old DAC API. void SetIsFirstFrame(bool isFirst) { LIMITED_METHOD_CONTRACT; m_crawl.isFirst = isFirst; } // whether the iterator has reached the root of the stack or not BOOL IsValid(void); // advance to the next frame according to the stackwalk flags StackWalkAction Next(void); enum FrameState { SFITER_UNINITIALIZED, // uninitialized SFITER_FRAMELESS_METHOD, // managed stack frame SFITER_FRAME_FUNCTION, // explicit frame SFITER_SKIPPED_FRAME_FUNCTION, // skipped explicit frame SFITER_NO_FRAME_TRANSITION, // no-frame transition (currently used for ExInfo only) SFITER_NATIVE_MARKER_FRAME, // the native stack frame immediately below (stack grows up) // a managed stack region SFITER_INITIAL_NATIVE_CONTEXT, // initial native seed CONTEXT SFITER_DONE, // the iterator has reached the end of the stack }; FrameState GetFrameState() {LIMITED_METHOD_DAC_CONTRACT; return m_frameState;} CrawlFrame m_crawl; #if defined(_DEBUG) // used in logging UINT32 m_uFramesProcessed; #endif // _DEBUG private: // This is a helper for the two constructors. void CommonCtor(Thread * pThread, PTR_Frame pFrame, ULONG32 flags); // Reset the CrawlFrame owned by the iterator. Used by both Init() and ResetRegDisp(). void ResetCrawlFrame(void); // Check whether we should stop at the current frame given the stackwalk flags. // If not, continue advancing to the next frame. StackWalkAction Filter(void); // Advance to the next frame regardless of the stackwalk flags. This is used by Next() and Filter(). StackWalkAction NextRaw(void); // sync the REGDISPLAY to the current CONTEXT void UpdateRegDisp(void); // Check whether the IP is managed code. This function updates the following fields on CrawlFrame: // JitManagerInstance and isFrameless. void ProcessIp(PCODE Ip); // Update the CrawlFrame to represent where we have stopped. // This is called after advancing to a new frame. void ProcessCurrentFrame(void); // If an explicit frame is allocated in a managed stack frame (e.g. an inlined pinvoke call), // we may have skipped an explicit frame. This function checks for them. BOOL CheckForSkippedFrames(void); // Perform the necessary tasks before stopping at a managed stack frame. This is mostly validation work. void PreProcessingForManagedFrames(void); // Perform the necessary tasks after stopping at a managed stack frame and unwinding to its caller. // This includes advancing the ExInfo and checking whether the new IP is managed. void PostProcessingForManagedFrames(void); // Perform the necessary tasks after stopping at a no-frame transition. This includes loading // the CONTEXT stored in the ExInfo and updating the REGDISPLAY to the faulting managed stack frame. void PostProcessingForNoFrameTransition(void); #if defined(FEATURE_EH_FUNCLETS) void ResetGCRefReportingState(bool ResetOnlyIntermediaryState = false) { LIMITED_METHOD_CONTRACT; if (!ResetOnlyIntermediaryState) { m_sfFuncletParent = StackFrame(); m_fProcessNonFilterFunclet = false; } m_sfIntermediaryFuncletParent = StackFrame(); m_fProcessIntermediaryNonFilterFunclet = false; } #endif // defined(FEATURE_EH_FUNCLETS) // Iteration state. FrameState m_frameState; // Initial state. Must be preserved for restarting. Thread * m_pThread; // Thread on which to walk. PTR_Frame m_pStartFrame; // Frame* passed to Init // This is the real starting explicit frame. If m_pStartFrame is NULL, // then this is equal to m_pThread->GetFrame(). Otherwise this is equal to m_pStartFrame. INDEBUG(PTR_Frame m_pRealStartFrame); ULONG32 m_flags; // StackWalkFrames flags. ICodeManagerFlags m_codeManFlags; ExecutionManager::ScanFlag m_scanFlag; // the following fields are used to cache information about a managed stack frame // when we need to stop for skipped explicit frames EECodeInfo m_cachedCodeInfo; GSCookie * m_pCachedGSCookie; #if defined(ELIMINATE_FEF) ExInfoWalker m_exInfoWalk; #endif // ELIMINATE_FEF #if defined(FEATURE_EH_FUNCLETS) // used in funclet-skipping StackFrame m_sfParent; // Used in GC reference enumeration mode StackFrame m_sfFuncletParent; bool m_fProcessNonFilterFunclet; StackFrame m_sfIntermediaryFuncletParent; bool m_fProcessIntermediaryNonFilterFunclet; bool m_fDidFuncletReportGCReferences; #endif // FEATURE_EH_FUNCLETS #if defined(RECORD_RESUMABLE_FRAME_SP) LPVOID m_pvResumableFrameTargetSP; #endif // RECORD_RESUMABLE_FRAME_SP }; void SetUpRegdisplayForStackWalk(Thread * pThread, T_CONTEXT * pContext, REGDISPLAY * pRegdisplay); #endif
-1
dotnet/runtime
66,235
Revert "Fix issues related to JsonSerializerOptions mutation and caching."
Reverts dotnet/runtime#65863 Fixes #66232
jkotas
2022-03-05T06:23:45Z
2022-03-05T14:08:07Z
17662fc30cfd4cc6e7dbba978a3fb512380c0b70
3ede8095da51d5d27a890020b61376155e9a61c2
Revert "Fix issues related to JsonSerializerOptions mutation and caching.". Reverts dotnet/runtime#65863 Fixes #66232
./src/libraries/System.Data.Common/src/System/Data/TypedTableBase.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections; using System.Collections.Generic; using System.Diagnostics.CodeAnalysis; using System.Linq; namespace System.Data { /// <summary> /// This is the generic base class for TypedDataSet /// </summary> [Serializable] public abstract class TypedTableBase<T> : DataTable, IEnumerable<T> where T : DataRow { /// <summary> /// Default constructor for generic TypedTableBase. /// Will be called by generated Typed DataSet classes and is not for public use. /// </summary> protected TypedTableBase() : base() { } /// <summary> /// Constructor for the generic TypedTableBase with takes SerializationInfo and StreamingContext. /// Will be called by generated Typed DataSet classes and /// is not for public use. /// </summary> /// <param name="info">SerializationInfo containing data to construct the object.</param> /// <param name="context">The streaming context for the object being deserialized.</param> [UnconditionalSuppressMessage("ReflectionAnalysis", "IL2112:ReflectionToRequiresUnreferencedCode", Justification = "DataTable.CreateInstance's use of GetType uses only the parameterless constructor, not this serialization related constructor.")] [RequiresUnreferencedCode(DataSet.RequiresUnreferencedCodeMessage)] protected TypedTableBase(System.Runtime.Serialization.SerializationInfo info, System.Runtime.Serialization.StreamingContext context) : base(info, context) { } /// <summary> /// This property returns an enumerator of T for the TypedTable. Note, this could /// execute the underlying Linq expression. /// </summary> /// <returns>IEnumerable of T.</returns> public IEnumerator<T> GetEnumerator() { return Rows.Cast<T>().GetEnumerator(); } IEnumerator IEnumerable.GetEnumerator() { return GetEnumerator(); } /// <summary> /// Casts an EnumerableDataTable_TSource into EnumerableDataTable_TResult /// </summary> public EnumerableRowCollection<TResult> Cast<TResult>() { EnumerableRowCollection<T> erc = new EnumerableRowCollection<T>(this); return erc.Cast<TResult>(); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections; using System.Collections.Generic; using System.Diagnostics.CodeAnalysis; using System.Linq; namespace System.Data { /// <summary> /// This is the generic base class for TypedDataSet /// </summary> [Serializable] public abstract class TypedTableBase<T> : DataTable, IEnumerable<T> where T : DataRow { /// <summary> /// Default constructor for generic TypedTableBase. /// Will be called by generated Typed DataSet classes and is not for public use. /// </summary> protected TypedTableBase() : base() { } /// <summary> /// Constructor for the generic TypedTableBase with takes SerializationInfo and StreamingContext. /// Will be called by generated Typed DataSet classes and /// is not for public use. /// </summary> /// <param name="info">SerializationInfo containing data to construct the object.</param> /// <param name="context">The streaming context for the object being deserialized.</param> [UnconditionalSuppressMessage("ReflectionAnalysis", "IL2112:ReflectionToRequiresUnreferencedCode", Justification = "DataTable.CreateInstance's use of GetType uses only the parameterless constructor, not this serialization related constructor.")] [RequiresUnreferencedCode(DataSet.RequiresUnreferencedCodeMessage)] protected TypedTableBase(System.Runtime.Serialization.SerializationInfo info, System.Runtime.Serialization.StreamingContext context) : base(info, context) { } /// <summary> /// This property returns an enumerator of T for the TypedTable. Note, this could /// execute the underlying Linq expression. /// </summary> /// <returns>IEnumerable of T.</returns> public IEnumerator<T> GetEnumerator() { return Rows.Cast<T>().GetEnumerator(); } IEnumerator IEnumerable.GetEnumerator() { return GetEnumerator(); } /// <summary> /// Casts an EnumerableDataTable_TSource into EnumerableDataTable_TResult /// </summary> public EnumerableRowCollection<TResult> Cast<TResult>() { EnumerableRowCollection<T> erc = new EnumerableRowCollection<T>(this); return erc.Cast<TResult>(); } } }
-1
dotnet/runtime
66,235
Revert "Fix issues related to JsonSerializerOptions mutation and caching."
Reverts dotnet/runtime#65863 Fixes #66232
jkotas
2022-03-05T06:23:45Z
2022-03-05T14:08:07Z
17662fc30cfd4cc6e7dbba978a3fb512380c0b70
3ede8095da51d5d27a890020b61376155e9a61c2
Revert "Fix issues related to JsonSerializerOptions mutation and caching.". Reverts dotnet/runtime#65863 Fixes #66232
./src/coreclr/vm/stubhelpers.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // File: stubhelpers.cpp // #include "common.h" #include "mlinfo.h" #include "stubhelpers.h" #include "jitinterface.h" #include "dllimport.h" #include "fieldmarshaler.h" #include "comdelegate.h" #include "eventtrace.h" #include "comdatetime.h" #include "gcheaputilities.h" #include "interoputil.h" #ifdef FEATURE_COMINTEROP #include <oletls.h> #include "olecontexthelpers.h" #include "runtimecallablewrapper.h" #include "comcallablewrapper.h" #include "clrtocomcall.h" #include "cominterfacemarshaler.h" #endif #ifdef VERIFY_HEAP CQuickArray<StubHelpers::ByrefValidationEntry> StubHelpers::s_ByrefValidationEntries; SIZE_T StubHelpers::s_ByrefValidationIndex = 0; CrstStatic StubHelpers::s_ByrefValidationLock; // static void StubHelpers::Init() { WRAPPER_NO_CONTRACT; s_ByrefValidationLock.Init(CrstPinnedByrefValidation); } // static void StubHelpers::ValidateObjectInternal(Object *pObjUNSAFE, BOOL fValidateNextObj) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; _ASSERTE(GCHeapUtilities::GetGCHeap()->RuntimeStructuresValid()); // validate the object - there's no need to validate next object's // header since we validate the next object explicitly below if (pObjUNSAFE) { pObjUNSAFE->Validate(/*bDeep=*/ TRUE, /*bVerifyNextHeader=*/ FALSE, /*bVerifySyncBlock=*/ TRUE); } // and the next object as required if (fValidateNextObj) { Object *nextObj = GCHeapUtilities::GetGCHeap()->NextObj(pObjUNSAFE); if (nextObj != NULL) { // Note that the MethodTable of the object (i.e. the pointer at offset 0) can change from // g_pFreeObjectMethodTable to NULL, from NULL to <legal-value>, or possibly also from // g_pFreeObjectMethodTable to <legal-value> concurrently while executing this function. // Once <legal-value> is seen, we believe that the object should pass the Validate check. // We have to be careful and read the pointer only once to avoid "phantom reads". MethodTable *pMT = VolatileLoad(nextObj->GetMethodTablePtr()); if (pMT != NULL && pMT != g_pFreeObjectMethodTable) { // do *not* verify the next object's syncblock - the next object is not guaranteed to // be "alive" so the finalizer thread may have already released its syncblock nextObj->Validate(/*bDeep=*/ TRUE, /*bVerifyNextHeader=*/ FALSE, /*bVerifySyncBlock=*/ FALSE); } } } } // static MethodDesc *StubHelpers::ResolveInteropMethod(Object *pThisUNSAFE, MethodDesc *pMD) { WRAPPER_NO_CONTRACT; if (pMD == NULL && pThisUNSAFE != NULL) { // if this is a call via delegate, get its Invoke method MethodTable *pMT = pThisUNSAFE->GetMethodTable(); _ASSERTE(pMT->IsDelegate()); return ((DelegateEEClass *)pMT->GetClass())->GetInvokeMethod(); } return pMD; } // static void StubHelpers::FormatValidationMessage(MethodDesc *pMD, SString &ssErrorString) { CONTRACTL { THROWS; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; ssErrorString.Append(W("Detected managed heap corruption, likely culprit is interop call through ")); if (pMD == NULL) { // the only case where we don't have interop MD is CALLI ssErrorString.Append(W("CALLI.")); } else { ssErrorString.Append(W("method '")); StackSString ssClassName; pMD->GetMethodTable()->_GetFullyQualifiedNameForClass(ssClassName); ssErrorString.Append(ssClassName); ssErrorString.Append(NAMESPACE_SEPARATOR_CHAR); ssErrorString.AppendUTF8(pMD->GetName()); ssErrorString.Append(W("'.")); } } // static void StubHelpers::ProcessByrefValidationList() { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; StackSString errorString; ByrefValidationEntry entry = { NULL, NULL }; EX_TRY { AVInRuntimeImplOkayHolder AVOkay; // Process all byref validation entries we have saved since the last GC. Note that EE is suspended at // this point so we don't have to take locks and we can safely call code:GCHeap.GetContainingObject. for (SIZE_T i = 0; i < s_ByrefValidationIndex; i++) { entry = s_ByrefValidationEntries[i]; Object *pObjUNSAFE = GCHeapUtilities::GetGCHeap()->GetContainingObject(entry.pByref, false); ValidateObjectInternal(pObjUNSAFE, TRUE); } } EX_CATCH { EX_TRY { FormatValidationMessage(entry.pMD, errorString); EEPOLICY_HANDLE_FATAL_ERROR_WITH_MESSAGE(COR_E_EXECUTIONENGINE, errorString.GetUnicode()); } EX_CATCH { EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE); } EX_END_CATCH_UNREACHABLE; } EX_END_CATCH_UNREACHABLE; s_ByrefValidationIndex = 0; } #endif // VERIFY_HEAP #ifdef FEATURE_COMINTEROP FORCEINLINE static void GetCOMIPFromRCW_ClearFP() { LIMITED_METHOD_CONTRACT; #ifdef TARGET_X86 // As per ASURT 146699 we need to clear FP state before calling to COM // the following sequence was previously generated to compiled ML stubs // and is faster than _clearfp(). __asm { fnstsw ax and eax, 0x3F jz NoNeedToClear fnclex NoNeedToClear: } #endif // TARGET_X86 } FORCEINLINE static SOleTlsData *GetOrCreateOleTlsData() { LIMITED_METHOD_CONTRACT; SOleTlsData *pOleTlsData; #ifdef TARGET_X86 // This saves 1 memory instruction over NtCurretTeb()->ReservedForOle because // NtCurrentTeb() reads _TEB.NtTib.Self which is the same as what FS:0 already // points to. pOleTlsData = (SOleTlsData *)(ULONG_PTR)__readfsdword(offsetof(TEB, ReservedForOle)); #else // TARGET_X86 pOleTlsData = (SOleTlsData *)NtCurrentTeb()->ReservedForOle; #endif // TARGET_X86 if (pOleTlsData == NULL) { pOleTlsData = (SOleTlsData *)SetupOleContext(); } return pOleTlsData; } FORCEINLINE static void *GetCOMIPFromRCW_GetTargetNoInterception(IUnknown *pUnk, ComPlusCallInfo *pComInfo) { LIMITED_METHOD_CONTRACT; LPVOID *lpVtbl = *(LPVOID **)pUnk; return lpVtbl[pComInfo->m_cachedComSlot]; } FORCEINLINE static IUnknown *GetCOMIPFromRCW_GetIUnknownFromRCWCache(RCW *pRCW, MethodTable * pItfMT) { LIMITED_METHOD_CONTRACT; // The code in this helper is the "fast path" that used to be generated directly // to compiled ML stubs. The idea is to aim for an efficient RCW cache hit. SOleTlsData * pOleTlsData = GetOrCreateOleTlsData(); // test for free-threaded after testing for context match to optimize for apartment-bound objects if (pOleTlsData->pCurrentCtx == pRCW->GetWrapperCtxCookie() || pRCW->IsFreeThreaded()) { for (int i = 0; i < INTERFACE_ENTRY_CACHE_SIZE; i++) { if (pRCW->m_aInterfaceEntries[i].m_pMT == pItfMT) { return pRCW->m_aInterfaceEntries[i].m_pUnknown; } } } return NULL; } // Like GetCOMIPFromRCW_GetIUnknownFromRCWCache but also computes the target. This is a couple of instructions // faster than GetCOMIPFromRCW_GetIUnknownFromRCWCache + GetCOMIPFromRCW_GetTargetNoInterception. FORCEINLINE static IUnknown *GetCOMIPFromRCW_GetIUnknownFromRCWCache_NoInterception(RCW *pRCW, ComPlusCallInfo *pComInfo, void **ppTarget) { LIMITED_METHOD_CONTRACT; // The code in this helper is the "fast path" that used to be generated directly // to compiled ML stubs. The idea is to aim for an efficient RCW cache hit. SOleTlsData *pOleTlsData = GetOrCreateOleTlsData(); MethodTable *pItfMT = pComInfo->m_pInterfaceMT; // test for free-threaded after testing for context match to optimize for apartment-bound objects if (pOleTlsData->pCurrentCtx == pRCW->GetWrapperCtxCookie() || pRCW->IsFreeThreaded()) { for (int i = 0; i < INTERFACE_ENTRY_CACHE_SIZE; i++) { if (pRCW->m_aInterfaceEntries[i].m_pMT == pItfMT) { IUnknown *pUnk = pRCW->m_aInterfaceEntries[i].m_pUnknown; _ASSERTE(pUnk != NULL); *ppTarget = GetCOMIPFromRCW_GetTargetNoInterception(pUnk, pComInfo); return pUnk; } } } return NULL; } FORCEINLINE static void *GetCOMIPFromRCW_GetTarget(IUnknown *pUnk, ComPlusCallInfo *pComInfo) { LIMITED_METHOD_CONTRACT; LPVOID *lpVtbl = *(LPVOID **)pUnk; return lpVtbl[pComInfo->m_cachedComSlot]; } NOINLINE static IUnknown* GetCOMIPFromRCWHelper(LPVOID pFCall, OBJECTREF pSrc, MethodDesc* pMD, void **ppTarget) { FC_INNER_PROLOG(pFCall); IUnknown *pIntf = NULL; // This is only called in IL stubs which are in CER, so we don't need to worry about ThreadAbort HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB_1(Frame::FRAME_ATTR_NO_THREAD_ABORT|Frame::FRAME_ATTR_EXACT_DEPTH|Frame::FRAME_ATTR_CAPTURE_DEPTH_2, pSrc); SafeComHolder<IUnknown> pRetUnk; ComPlusCallInfo *pComInfo = ComPlusCallInfo::FromMethodDesc(pMD); pRetUnk = ComObject::GetComIPFromRCWThrowing(&pSrc, pComInfo->m_pInterfaceMT); *ppTarget = GetCOMIPFromRCW_GetTarget(pRetUnk, pComInfo); _ASSERTE(*ppTarget != NULL); GetCOMIPFromRCW_ClearFP(); pIntf = pRetUnk.Extract(); // No exception will be thrown here (including thread abort as it is delayed in IL stubs) HELPER_METHOD_FRAME_END(); FC_INNER_EPILOG(); return pIntf; } //================================================================================================================== // The GetCOMIPFromRCW helper exists in four specialized versions to optimize CLR->COM perf. Please be careful when // changing this code as one of these methods is executed as part of every CLR->COM call so every instruction counts. //================================================================================================================== #include <optsmallperfcritical.h> // This helper can handle any CLR->COM call, it supports hosting, // and clears FP state on x86 for compatibility with VB6. FCIMPL4(IUnknown*, StubHelpers::GetCOMIPFromRCW, Object* pSrcUNSAFE, MethodDesc* pMD, void **ppTarget, CLR_BOOL* pfNeedsRelease) { CONTRACTL { FCALL_CHECK; PRECONDITION(pMD->IsComPlusCall() || pMD->IsGenericComPlusCall() || pMD->IsEEImpl()); } CONTRACTL_END; OBJECTREF pSrc = ObjectToOBJECTREF(pSrcUNSAFE); *pfNeedsRelease = false; ComPlusCallInfo *pComInfo = ComPlusCallInfo::FromMethodDesc(pMD); RCW *pRCW = pSrc->PassiveGetSyncBlock()->GetInteropInfoNoCreate()->GetRawRCW(); if (pRCW != NULL) { IUnknown * pUnk = GetCOMIPFromRCW_GetIUnknownFromRCWCache(pRCW, pComInfo->m_pInterfaceMT); if (pUnk != NULL) { *ppTarget = GetCOMIPFromRCW_GetTarget(pUnk, pComInfo); if (*ppTarget != NULL) { GetCOMIPFromRCW_ClearFP(); return pUnk; } } } /* if we didn't find the COM interface pointer in the cache we will have to erect an HMF */ *pfNeedsRelease = true; FC_INNER_RETURN(IUnknown*, GetCOMIPFromRCWHelper(StubHelpers::GetCOMIPFromRCW, pSrc, pMD, ppTarget)); } FCIMPLEND #include <optdefault.h> FCIMPL2(void, StubHelpers::ObjectMarshaler__ConvertToNative, Object* pSrcUNSAFE, VARIANT* pDest) { FCALL_CONTRACT; OBJECTREF pSrc = ObjectToOBJECTREF(pSrcUNSAFE); HELPER_METHOD_FRAME_BEGIN_1(pSrc); if (pDest->vt & VT_BYREF) { OleVariant::MarshalOleRefVariantForObject(&pSrc, pDest); } else { OleVariant::MarshalOleVariantForObject(&pSrc, pDest); } HELPER_METHOD_FRAME_END(); } FCIMPLEND FCIMPL1(Object*, StubHelpers::ObjectMarshaler__ConvertToManaged, VARIANT* pSrc) { FCALL_CONTRACT; OBJECTREF retVal = NULL; HELPER_METHOD_FRAME_BEGIN_RET_1(retVal); // The IL stub is going to call ObjectMarshaler__ClearNative() afterwards. // If it doesn't it's a bug in ILObjectMarshaler. OleVariant::MarshalObjectForOleVariant(pSrc, &retVal); HELPER_METHOD_FRAME_END(); return OBJECTREFToObject(retVal); } FCIMPLEND FCIMPL1(void, StubHelpers::ObjectMarshaler__ClearNative, VARIANT* pSrc) { FCALL_CONTRACT; HELPER_METHOD_FRAME_BEGIN_0(); SafeVariantClear(pSrc); HELPER_METHOD_FRAME_END(); } FCIMPLEND #include <optsmallperfcritical.h> FCIMPL4(IUnknown*, StubHelpers::InterfaceMarshaler__ConvertToNative, Object* pObjUNSAFE, MethodTable* pItfMT, MethodTable* pClsMT, DWORD dwFlags) { FCALL_CONTRACT; if (NULL == pObjUNSAFE) { return NULL; } IUnknown *pIntf = NULL; OBJECTREF pObj = ObjectToOBJECTREF(pObjUNSAFE); // This is only called in IL stubs which are in CER, so we don't need to worry about ThreadAbort HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB_1(Frame::FRAME_ATTR_NO_THREAD_ABORT, pObj); // We're going to be making some COM calls, better initialize COM. EnsureComStarted(); pIntf = MarshalObjectToInterface(&pObj, pItfMT, pClsMT, dwFlags); // No exception will be thrown here (including thread abort as it is delayed in IL stubs) HELPER_METHOD_FRAME_END(); return pIntf; } FCIMPLEND FCIMPL4(Object*, StubHelpers::InterfaceMarshaler__ConvertToManaged, IUnknown **ppUnk, MethodTable *pItfMT, MethodTable *pClsMT, DWORD dwFlags) { FCALL_CONTRACT; if (NULL == *ppUnk) { return NULL; } OBJECTREF pObj = NULL; HELPER_METHOD_FRAME_BEGIN_RET_1(pObj); // We're going to be making some COM calls, better initialize COM. EnsureComStarted(); UnmarshalObjectFromInterface(&pObj, ppUnk, pItfMT, pClsMT, dwFlags); HELPER_METHOD_FRAME_END(); return OBJECTREFToObject(pObj); } FCIMPLEND extern "C" void QCALLTYPE InterfaceMarshaler__ClearNative(IUnknown * pUnk) { QCALL_CONTRACT; BEGIN_QCALL; ULONG cbRef = SafeReleasePreemp(pUnk); LogInteropRelease(pUnk, cbRef, "InterfaceMarshalerBase::ClearNative: In/Out release"); END_QCALL; } #include <optdefault.h> #endif // FEATURE_COMINTEROP FCIMPL0(void, StubHelpers::SetLastError) { // Make sure this is the first thing we do after returning from the target, as almost everything can cause the last error to get trashed DWORD lastError = ::GetLastError(); FCALL_CONTRACT; GetThread()->m_dwLastError = lastError; } FCIMPLEND FCIMPL0(void, StubHelpers::ClearLastError) { FCALL_CONTRACT; ::SetLastError(0); } FCIMPLEND FCIMPL1(void*, StubHelpers::GetNDirectTarget, NDirectMethodDesc* pNMD) { FCALL_CONTRACT; FCUnique(0xa2); return pNMD->GetNDirectTarget(); } FCIMPLEND FCIMPL1(void*, StubHelpers::GetDelegateTarget, DelegateObject *pThisUNSAFE) { PCODE pEntryPoint = NULL; #ifdef _DEBUG BEGIN_PRESERVE_LAST_ERROR; #endif CONTRACTL { FCALL_CHECK; PRECONDITION(CheckPointer(pThisUNSAFE)); } CONTRACTL_END; DELEGATEREF orefThis = (DELEGATEREF)ObjectToOBJECTREF(pThisUNSAFE); #if defined(HOST_64BIT) UINT_PTR target = (UINT_PTR)orefThis->GetMethodPtrAux(); // See code:GenericPInvokeCalliHelper // The lowest bit is used to distinguish between MD and target on 64-bit. target = (target << 1) | 1; #endif // HOST_64BIT pEntryPoint = orefThis->GetMethodPtrAux(); #ifdef _DEBUG END_PRESERVE_LAST_ERROR; #endif return (PVOID)pEntryPoint; } FCIMPLEND FCIMPL2(void, StubHelpers::ThrowInteropParamException, UINT resID, UINT paramIdx) { FCALL_CONTRACT; HELPER_METHOD_FRAME_BEGIN_0(); ::ThrowInteropParamException(resID, paramIdx); HELPER_METHOD_FRAME_END(); } FCIMPLEND #ifdef PROFILING_SUPPORTED FCIMPL3(SIZE_T, StubHelpers::ProfilerBeginTransitionCallback, SIZE_T pSecretParam, Thread* pThread, Object* unsafe_pThis) { FCALL_CONTRACT; // We can get here with an ngen image generated with "/prof", // even if the profiler doesn't want to track transitions. if (!CORProfilerTrackTransitions()) { return NULL; } MethodDesc* pRealMD = NULL; BEGIN_PRESERVE_LAST_ERROR; // We must transition to preemptive GC mode before calling out to the profiler, // and the transition requires us to set up a HMF. DELEGATEREF dref = (DELEGATEREF)ObjectToOBJECTREF(unsafe_pThis); HELPER_METHOD_FRAME_BEGIN_RET_1(dref); bool fReverseInterop = false; if (NULL == pThread) { // This is our signal for the reverse interop cases. fReverseInterop = true; pThread = GET_THREAD(); // the secret param in this casee is the UMEntryThunk pRealMD = ((UMEntryThunk*)pSecretParam)->GetMethod(); } else if (pSecretParam == 0) { // Secret param is null. This is the calli pinvoke case or the unmanaged delegate case. // We have an unmanaged target address but no MD. For the unmanaged delegate case, we can // still retrieve the MD by looking at the "this" object. if (dref == NULL) { // calli pinvoke case pRealMD = NULL; } else { // unmanaged delegate case MethodTable* pMT = dref->GetMethodTable(); _ASSERTE(pMT->IsDelegate()); EEClass * pClass = pMT->GetClass(); pRealMD = ((DelegateEEClass*)pClass)->GetInvokeMethod(); _ASSERTE(pRealMD); } } else { // This is either the COM interop or the pinvoke case. pRealMD = (MethodDesc*)pSecretParam; } { GCX_PREEMP_THREAD_EXISTS(pThread); if (fReverseInterop) { ProfilerUnmanagedToManagedTransitionMD(pRealMD, COR_PRF_TRANSITION_CALL); } else { ProfilerManagedToUnmanagedTransitionMD(pRealMD, COR_PRF_TRANSITION_CALL); } } HELPER_METHOD_FRAME_END(); END_PRESERVE_LAST_ERROR; return (SIZE_T)pRealMD; } FCIMPLEND FCIMPL2(void, StubHelpers::ProfilerEndTransitionCallback, MethodDesc* pRealMD, Thread* pThread) { FCALL_CONTRACT; // We can get here with an ngen image generated with "/prof", // even if the profiler doesn't want to track transitions. if (!CORProfilerTrackTransitions()) { return; } BEGIN_PRESERVE_LAST_ERROR; // We must transition to preemptive GC mode before calling out to the profiler, // and the transition requires us to set up a HMF. HELPER_METHOD_FRAME_BEGIN_0(); { bool fReverseInterop = false; if (NULL == pThread) { // if pThread is null, we are doing reverse interop pThread = GET_THREAD(); fReverseInterop = true; } GCX_PREEMP_THREAD_EXISTS(pThread); if (fReverseInterop) { ProfilerManagedToUnmanagedTransitionMD(pRealMD, COR_PRF_TRANSITION_RETURN); } else { ProfilerUnmanagedToManagedTransitionMD(pRealMD, COR_PRF_TRANSITION_RETURN); } } HELPER_METHOD_FRAME_END(); END_PRESERVE_LAST_ERROR; } FCIMPLEND #endif // PROFILING_SUPPORTED FCIMPL1(Object*, StubHelpers::GetHRExceptionObject, HRESULT hr) { FCALL_CONTRACT; OBJECTREF oThrowable = NULL; HELPER_METHOD_FRAME_BEGIN_RET_1(oThrowable); { // GetExceptionForHR uses equivalant logic as COMPlusThrowHR GetExceptionForHR(hr, &oThrowable); } HELPER_METHOD_FRAME_END(); return OBJECTREFToObject(oThrowable); } FCIMPLEND #ifdef FEATURE_COMINTEROP FCIMPL3(Object*, StubHelpers::GetCOMHRExceptionObject, HRESULT hr, MethodDesc *pMD, Object *unsafe_pThis) { FCALL_CONTRACT; OBJECTREF oThrowable = NULL; // get 'this' OBJECTREF oref = ObjectToOBJECTREF(unsafe_pThis); HELPER_METHOD_FRAME_BEGIN_RET_2(oref, oThrowable); { IErrorInfo *pErrInfo = NULL; if (pErrInfo == NULL && pMD != NULL) { // Retrieve the interface method table. MethodTable *pItfMT = ComPlusCallInfo::FromMethodDesc(pMD)->m_pInterfaceMT; // Get IUnknown pointer for this interface on this object IUnknown* pUnk = ComObject::GetComIPFromRCW(&oref, pItfMT); if (pUnk != NULL) { // Check to see if the component supports error information for this interface. IID ItfIID; pItfMT->GetGuid(&ItfIID, TRUE); pErrInfo = GetSupportedErrorInfo(pUnk, ItfIID); DWORD cbRef = SafeRelease(pUnk); LogInteropRelease(pUnk, cbRef, "IUnk to QI for ISupportsErrorInfo"); } } GetExceptionForHR(hr, pErrInfo, &oThrowable); } HELPER_METHOD_FRAME_END(); return OBJECTREFToObject(oThrowable); } FCIMPLEND #endif // FEATURE_COMINTEROP FCIMPL3(void, StubHelpers::FmtClassUpdateNativeInternal, Object* pObjUNSAFE, BYTE* pbNative, OBJECTREF *ppCleanupWorkListOnStack) { FCALL_CONTRACT; OBJECTREF pObj = ObjectToOBJECTREF(pObjUNSAFE); HELPER_METHOD_FRAME_BEGIN_1(pObj); MethodTable* pMT = pObj->GetMethodTable(); if (pMT->IsBlittable()) { memcpyNoGCRefs(pbNative, pObj->GetData(), pMT->GetNativeSize()); } else { MethodDesc* structMarshalStub; { GCX_PREEMP(); structMarshalStub = NDirect::CreateStructMarshalILStub(pMT); } MarshalStructViaILStub(structMarshalStub, pObj->GetData(), pbNative, StructMarshalStubs::MarshalOperation::Marshal, (void**)ppCleanupWorkListOnStack); } HELPER_METHOD_FRAME_END(); } FCIMPLEND FCIMPL2(void, StubHelpers::FmtClassUpdateCLRInternal, Object* pObjUNSAFE, BYTE* pbNative) { FCALL_CONTRACT; OBJECTREF pObj = ObjectToOBJECTREF(pObjUNSAFE); HELPER_METHOD_FRAME_BEGIN_1(pObj); MethodTable* pMT = pObj->GetMethodTable(); if (pMT->IsBlittable()) { memcpyNoGCRefs(pObj->GetData(), pbNative, pMT->GetNativeSize()); } else { MethodDesc* structMarshalStub; { GCX_PREEMP(); structMarshalStub = NDirect::CreateStructMarshalILStub(pMT); } MarshalStructViaILStub(structMarshalStub, pObj->GetData(), pbNative, StructMarshalStubs::MarshalOperation::Unmarshal); } HELPER_METHOD_FRAME_END(); } FCIMPLEND FCIMPL2(void, StubHelpers::LayoutDestroyNativeInternal, Object* pObjUNSAFE, BYTE* pbNative) { FCALL_CONTRACT; OBJECTREF pObj = ObjectToOBJECTREF(pObjUNSAFE); HELPER_METHOD_FRAME_BEGIN_1(pObj); MethodTable* pMT = pObj->GetMethodTable(); if (!pMT->IsBlittable()) { MethodDesc* structMarshalStub; { GCX_PREEMP(); structMarshalStub = NDirect::CreateStructMarshalILStub(pMT); } MarshalStructViaILStub(structMarshalStub, pObj->GetData(), pbNative, StructMarshalStubs::MarshalOperation::Cleanup); } HELPER_METHOD_FRAME_END(); } FCIMPLEND FCIMPL1(Object*, StubHelpers::AllocateInternal, EnregisteredTypeHandle pRegisteredTypeHnd) { FCALL_CONTRACT; TypeHandle typeHnd = TypeHandle::FromPtr(pRegisteredTypeHnd); OBJECTREF objRet = NULL; HELPER_METHOD_FRAME_BEGIN_RET_1(objRet); MethodTable* pMT = typeHnd.GetMethodTable(); objRet = pMT->Allocate(); HELPER_METHOD_FRAME_END(); return OBJECTREFToObject(objRet); } FCIMPLEND FCIMPL3(void, StubHelpers::MarshalToUnmanagedVaListInternal, va_list va, DWORD cbVaListSize, const VARARGS* pArgIterator) { FCALL_CONTRACT; HELPER_METHOD_FRAME_BEGIN_0(); VARARGS::MarshalToUnmanagedVaList(va, cbVaListSize, pArgIterator); HELPER_METHOD_FRAME_END(); } FCIMPLEND FCIMPL2(void, StubHelpers::MarshalToManagedVaListInternal, va_list va, VARARGS* pArgIterator) { FCALL_CONTRACT; VARARGS::MarshalToManagedVaList(va, pArgIterator); } FCIMPLEND FCIMPL3(void, StubHelpers::ValidateObject, Object *pObjUNSAFE, MethodDesc *pMD, Object *pThisUNSAFE) { FCALL_CONTRACT; #ifdef VERIFY_HEAP HELPER_METHOD_FRAME_BEGIN_0(); StackSString errorString; EX_TRY { AVInRuntimeImplOkayHolder AVOkay; // don't validate the next object if a BGC is in progress. we can race with background // sweep which could make the next object a Free object underneath us if it's dead. ValidateObjectInternal(pObjUNSAFE, !(GCHeapUtilities::GetGCHeap()->IsConcurrentGCInProgress())); } EX_CATCH { FormatValidationMessage(ResolveInteropMethod(pThisUNSAFE, pMD), errorString); EEPOLICY_HANDLE_FATAL_ERROR_WITH_MESSAGE(COR_E_EXECUTIONENGINE, errorString.GetUnicode()); } EX_END_CATCH_UNREACHABLE; HELPER_METHOD_FRAME_END(); #else // VERIFY_HEAP FCUnique(0xa3); UNREACHABLE_MSG("No validation support without VERIFY_HEAP"); #endif // VERIFY_HEAP } FCIMPLEND FCIMPL3(void, StubHelpers::ValidateByref, void *pByref, MethodDesc *pMD, Object *pThisUNSAFE) { FCALL_CONTRACT; #ifdef VERIFY_HEAP // We cannot validate byrefs at this point as code:GCHeap.GetContainingObject could potentially race // with allocations on other threads. We'll just remember this byref along with the interop MD and // perform the validation on next GC (see code:StubHelpers.ProcessByrefValidationList). // Skip byref if is not pointing inside managed heap if (!GCHeapUtilities::GetGCHeap()->IsHeapPointer(pByref)) { return; } ByrefValidationEntry entry; entry.pByref = pByref; entry.pMD = ResolveInteropMethod(pThisUNSAFE, pMD); HELPER_METHOD_FRAME_BEGIN_0(); SIZE_T NumOfEntries = 0; { CrstHolder ch(&s_ByrefValidationLock); if (s_ByrefValidationIndex >= s_ByrefValidationEntries.Size()) { // The validation list grows as necessary, for simplicity we never shrink it. SIZE_T newSize; if (!ClrSafeInt<SIZE_T>::multiply(s_ByrefValidationIndex, 2, newSize) || !ClrSafeInt<SIZE_T>::addition(newSize, 1, newSize)) { ThrowHR(COR_E_OVERFLOW); } s_ByrefValidationEntries.ReSizeThrows(newSize); _ASSERTE(s_ByrefValidationIndex < s_ByrefValidationEntries.Size()); } s_ByrefValidationEntries[s_ByrefValidationIndex] = entry; NumOfEntries = ++s_ByrefValidationIndex; } if (NumOfEntries > BYREF_VALIDATION_LIST_MAX_SIZE) { // if the list is too big, trigger GC now GCHeapUtilities::GetGCHeap()->GarbageCollect(0); } HELPER_METHOD_FRAME_END(); #else // VERIFY_HEAP FCUnique(0xa4); UNREACHABLE_MSG("No validation support without VERIFY_HEAP"); #endif // VERIFY_HEAP } FCIMPLEND FCIMPL0(void*, StubHelpers::GetStubContext) { FCALL_CONTRACT; FCUnique(0xa0); UNREACHABLE_MSG_RET("This is a JIT intrinsic!"); } FCIMPLEND FCIMPL2(void, StubHelpers::LogPinnedArgument, MethodDesc *target, Object *pinnedArg) { FCALL_CONTRACT; SIZE_T managedSize = 0; if (pinnedArg != NULL) { // Can pass null objects to interop, only check the size if the object is valid. managedSize = pinnedArg->GetSize(); } if (target != NULL) { STRESS_LOG3(LF_STUBS, LL_INFO100, "Managed object %#X with size '%#X' pinned for interop to Method [%pM]\n", pinnedArg, managedSize, target); } else { STRESS_LOG2(LF_STUBS, LL_INFO100, "Managed object %#X pinned for interop with size '%#X'", pinnedArg, managedSize); } } FCIMPLEND FCIMPL1(DWORD, StubHelpers::CalcVaListSize, VARARGS *varargs) { FCALL_CONTRACT; return VARARGS::CalcVaListSize(varargs); } FCIMPLEND #ifdef FEATURE_ARRAYSTUB_AS_IL NOINLINE static void ArrayTypeCheckSlow(Object* element, PtrArray* arr) { FC_INNER_PROLOG(StubHelpers::ArrayTypeCheck); HELPER_METHOD_FRAME_BEGIN_ATTRIB(Frame::FRAME_ATTR_EXACT_DEPTH|Frame::FRAME_ATTR_CAPTURE_DEPTH_2); if (!ObjIsInstanceOf(element, arr->GetArrayElementTypeHandle())) COMPlusThrow(kArrayTypeMismatchException); HELPER_METHOD_FRAME_END(); FC_INNER_EPILOG(); } FCIMPL2(void, StubHelpers::ArrayTypeCheck, Object* element, PtrArray* arr) { FCALL_CONTRACT; if (ObjIsInstanceOfCached(element, arr->GetArrayElementTypeHandle()) == TypeHandle::CanCast) return; FC_INNER_RETURN_VOID(ArrayTypeCheckSlow(element, arr)); } FCIMPLEND #endif // FEATURE_ARRAYSTUB_AS_IL #ifdef FEATURE_MULTICASTSTUB_AS_IL FCIMPL2(void, StubHelpers::MulticastDebuggerTraceHelper, Object* element, INT32 count) { FCALL_CONTRACT; FCUnique(0xa5); } FCIMPLEND #endif // FEATURE_MULTICASTSTUB_AS_IL FCIMPL0(void*, StubHelpers::NextCallReturnAddress) { FCALL_CONTRACT; UNREACHABLE_MSG("This is a JIT intrinsic!"); } FCIMPLEND
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // File: stubhelpers.cpp // #include "common.h" #include "mlinfo.h" #include "stubhelpers.h" #include "jitinterface.h" #include "dllimport.h" #include "fieldmarshaler.h" #include "comdelegate.h" #include "eventtrace.h" #include "comdatetime.h" #include "gcheaputilities.h" #include "interoputil.h" #ifdef FEATURE_COMINTEROP #include <oletls.h> #include "olecontexthelpers.h" #include "runtimecallablewrapper.h" #include "comcallablewrapper.h" #include "clrtocomcall.h" #include "cominterfacemarshaler.h" #endif #ifdef VERIFY_HEAP CQuickArray<StubHelpers::ByrefValidationEntry> StubHelpers::s_ByrefValidationEntries; SIZE_T StubHelpers::s_ByrefValidationIndex = 0; CrstStatic StubHelpers::s_ByrefValidationLock; // static void StubHelpers::Init() { WRAPPER_NO_CONTRACT; s_ByrefValidationLock.Init(CrstPinnedByrefValidation); } // static void StubHelpers::ValidateObjectInternal(Object *pObjUNSAFE, BOOL fValidateNextObj) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; _ASSERTE(GCHeapUtilities::GetGCHeap()->RuntimeStructuresValid()); // validate the object - there's no need to validate next object's // header since we validate the next object explicitly below if (pObjUNSAFE) { pObjUNSAFE->Validate(/*bDeep=*/ TRUE, /*bVerifyNextHeader=*/ FALSE, /*bVerifySyncBlock=*/ TRUE); } // and the next object as required if (fValidateNextObj) { Object *nextObj = GCHeapUtilities::GetGCHeap()->NextObj(pObjUNSAFE); if (nextObj != NULL) { // Note that the MethodTable of the object (i.e. the pointer at offset 0) can change from // g_pFreeObjectMethodTable to NULL, from NULL to <legal-value>, or possibly also from // g_pFreeObjectMethodTable to <legal-value> concurrently while executing this function. // Once <legal-value> is seen, we believe that the object should pass the Validate check. // We have to be careful and read the pointer only once to avoid "phantom reads". MethodTable *pMT = VolatileLoad(nextObj->GetMethodTablePtr()); if (pMT != NULL && pMT != g_pFreeObjectMethodTable) { // do *not* verify the next object's syncblock - the next object is not guaranteed to // be "alive" so the finalizer thread may have already released its syncblock nextObj->Validate(/*bDeep=*/ TRUE, /*bVerifyNextHeader=*/ FALSE, /*bVerifySyncBlock=*/ FALSE); } } } } // static MethodDesc *StubHelpers::ResolveInteropMethod(Object *pThisUNSAFE, MethodDesc *pMD) { WRAPPER_NO_CONTRACT; if (pMD == NULL && pThisUNSAFE != NULL) { // if this is a call via delegate, get its Invoke method MethodTable *pMT = pThisUNSAFE->GetMethodTable(); _ASSERTE(pMT->IsDelegate()); return ((DelegateEEClass *)pMT->GetClass())->GetInvokeMethod(); } return pMD; } // static void StubHelpers::FormatValidationMessage(MethodDesc *pMD, SString &ssErrorString) { CONTRACTL { THROWS; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; ssErrorString.Append(W("Detected managed heap corruption, likely culprit is interop call through ")); if (pMD == NULL) { // the only case where we don't have interop MD is CALLI ssErrorString.Append(W("CALLI.")); } else { ssErrorString.Append(W("method '")); StackSString ssClassName; pMD->GetMethodTable()->_GetFullyQualifiedNameForClass(ssClassName); ssErrorString.Append(ssClassName); ssErrorString.Append(NAMESPACE_SEPARATOR_CHAR); ssErrorString.AppendUTF8(pMD->GetName()); ssErrorString.Append(W("'.")); } } // static void StubHelpers::ProcessByrefValidationList() { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; StackSString errorString; ByrefValidationEntry entry = { NULL, NULL }; EX_TRY { AVInRuntimeImplOkayHolder AVOkay; // Process all byref validation entries we have saved since the last GC. Note that EE is suspended at // this point so we don't have to take locks and we can safely call code:GCHeap.GetContainingObject. for (SIZE_T i = 0; i < s_ByrefValidationIndex; i++) { entry = s_ByrefValidationEntries[i]; Object *pObjUNSAFE = GCHeapUtilities::GetGCHeap()->GetContainingObject(entry.pByref, false); ValidateObjectInternal(pObjUNSAFE, TRUE); } } EX_CATCH { EX_TRY { FormatValidationMessage(entry.pMD, errorString); EEPOLICY_HANDLE_FATAL_ERROR_WITH_MESSAGE(COR_E_EXECUTIONENGINE, errorString.GetUnicode()); } EX_CATCH { EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE); } EX_END_CATCH_UNREACHABLE; } EX_END_CATCH_UNREACHABLE; s_ByrefValidationIndex = 0; } #endif // VERIFY_HEAP #ifdef FEATURE_COMINTEROP FORCEINLINE static void GetCOMIPFromRCW_ClearFP() { LIMITED_METHOD_CONTRACT; #ifdef TARGET_X86 // As per ASURT 146699 we need to clear FP state before calling to COM // the following sequence was previously generated to compiled ML stubs // and is faster than _clearfp(). __asm { fnstsw ax and eax, 0x3F jz NoNeedToClear fnclex NoNeedToClear: } #endif // TARGET_X86 } FORCEINLINE static SOleTlsData *GetOrCreateOleTlsData() { LIMITED_METHOD_CONTRACT; SOleTlsData *pOleTlsData; #ifdef TARGET_X86 // This saves 1 memory instruction over NtCurretTeb()->ReservedForOle because // NtCurrentTeb() reads _TEB.NtTib.Self which is the same as what FS:0 already // points to. pOleTlsData = (SOleTlsData *)(ULONG_PTR)__readfsdword(offsetof(TEB, ReservedForOle)); #else // TARGET_X86 pOleTlsData = (SOleTlsData *)NtCurrentTeb()->ReservedForOle; #endif // TARGET_X86 if (pOleTlsData == NULL) { pOleTlsData = (SOleTlsData *)SetupOleContext(); } return pOleTlsData; } FORCEINLINE static void *GetCOMIPFromRCW_GetTargetNoInterception(IUnknown *pUnk, ComPlusCallInfo *pComInfo) { LIMITED_METHOD_CONTRACT; LPVOID *lpVtbl = *(LPVOID **)pUnk; return lpVtbl[pComInfo->m_cachedComSlot]; } FORCEINLINE static IUnknown *GetCOMIPFromRCW_GetIUnknownFromRCWCache(RCW *pRCW, MethodTable * pItfMT) { LIMITED_METHOD_CONTRACT; // The code in this helper is the "fast path" that used to be generated directly // to compiled ML stubs. The idea is to aim for an efficient RCW cache hit. SOleTlsData * pOleTlsData = GetOrCreateOleTlsData(); // test for free-threaded after testing for context match to optimize for apartment-bound objects if (pOleTlsData->pCurrentCtx == pRCW->GetWrapperCtxCookie() || pRCW->IsFreeThreaded()) { for (int i = 0; i < INTERFACE_ENTRY_CACHE_SIZE; i++) { if (pRCW->m_aInterfaceEntries[i].m_pMT == pItfMT) { return pRCW->m_aInterfaceEntries[i].m_pUnknown; } } } return NULL; } // Like GetCOMIPFromRCW_GetIUnknownFromRCWCache but also computes the target. This is a couple of instructions // faster than GetCOMIPFromRCW_GetIUnknownFromRCWCache + GetCOMIPFromRCW_GetTargetNoInterception. FORCEINLINE static IUnknown *GetCOMIPFromRCW_GetIUnknownFromRCWCache_NoInterception(RCW *pRCW, ComPlusCallInfo *pComInfo, void **ppTarget) { LIMITED_METHOD_CONTRACT; // The code in this helper is the "fast path" that used to be generated directly // to compiled ML stubs. The idea is to aim for an efficient RCW cache hit. SOleTlsData *pOleTlsData = GetOrCreateOleTlsData(); MethodTable *pItfMT = pComInfo->m_pInterfaceMT; // test for free-threaded after testing for context match to optimize for apartment-bound objects if (pOleTlsData->pCurrentCtx == pRCW->GetWrapperCtxCookie() || pRCW->IsFreeThreaded()) { for (int i = 0; i < INTERFACE_ENTRY_CACHE_SIZE; i++) { if (pRCW->m_aInterfaceEntries[i].m_pMT == pItfMT) { IUnknown *pUnk = pRCW->m_aInterfaceEntries[i].m_pUnknown; _ASSERTE(pUnk != NULL); *ppTarget = GetCOMIPFromRCW_GetTargetNoInterception(pUnk, pComInfo); return pUnk; } } } return NULL; } FORCEINLINE static void *GetCOMIPFromRCW_GetTarget(IUnknown *pUnk, ComPlusCallInfo *pComInfo) { LIMITED_METHOD_CONTRACT; LPVOID *lpVtbl = *(LPVOID **)pUnk; return lpVtbl[pComInfo->m_cachedComSlot]; } NOINLINE static IUnknown* GetCOMIPFromRCWHelper(LPVOID pFCall, OBJECTREF pSrc, MethodDesc* pMD, void **ppTarget) { FC_INNER_PROLOG(pFCall); IUnknown *pIntf = NULL; // This is only called in IL stubs which are in CER, so we don't need to worry about ThreadAbort HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB_1(Frame::FRAME_ATTR_NO_THREAD_ABORT|Frame::FRAME_ATTR_EXACT_DEPTH|Frame::FRAME_ATTR_CAPTURE_DEPTH_2, pSrc); SafeComHolder<IUnknown> pRetUnk; ComPlusCallInfo *pComInfo = ComPlusCallInfo::FromMethodDesc(pMD); pRetUnk = ComObject::GetComIPFromRCWThrowing(&pSrc, pComInfo->m_pInterfaceMT); *ppTarget = GetCOMIPFromRCW_GetTarget(pRetUnk, pComInfo); _ASSERTE(*ppTarget != NULL); GetCOMIPFromRCW_ClearFP(); pIntf = pRetUnk.Extract(); // No exception will be thrown here (including thread abort as it is delayed in IL stubs) HELPER_METHOD_FRAME_END(); FC_INNER_EPILOG(); return pIntf; } //================================================================================================================== // The GetCOMIPFromRCW helper exists in four specialized versions to optimize CLR->COM perf. Please be careful when // changing this code as one of these methods is executed as part of every CLR->COM call so every instruction counts. //================================================================================================================== #include <optsmallperfcritical.h> // This helper can handle any CLR->COM call, it supports hosting, // and clears FP state on x86 for compatibility with VB6. FCIMPL4(IUnknown*, StubHelpers::GetCOMIPFromRCW, Object* pSrcUNSAFE, MethodDesc* pMD, void **ppTarget, CLR_BOOL* pfNeedsRelease) { CONTRACTL { FCALL_CHECK; PRECONDITION(pMD->IsComPlusCall() || pMD->IsGenericComPlusCall() || pMD->IsEEImpl()); } CONTRACTL_END; OBJECTREF pSrc = ObjectToOBJECTREF(pSrcUNSAFE); *pfNeedsRelease = false; ComPlusCallInfo *pComInfo = ComPlusCallInfo::FromMethodDesc(pMD); RCW *pRCW = pSrc->PassiveGetSyncBlock()->GetInteropInfoNoCreate()->GetRawRCW(); if (pRCW != NULL) { IUnknown * pUnk = GetCOMIPFromRCW_GetIUnknownFromRCWCache(pRCW, pComInfo->m_pInterfaceMT); if (pUnk != NULL) { *ppTarget = GetCOMIPFromRCW_GetTarget(pUnk, pComInfo); if (*ppTarget != NULL) { GetCOMIPFromRCW_ClearFP(); return pUnk; } } } /* if we didn't find the COM interface pointer in the cache we will have to erect an HMF */ *pfNeedsRelease = true; FC_INNER_RETURN(IUnknown*, GetCOMIPFromRCWHelper(StubHelpers::GetCOMIPFromRCW, pSrc, pMD, ppTarget)); } FCIMPLEND #include <optdefault.h> FCIMPL2(void, StubHelpers::ObjectMarshaler__ConvertToNative, Object* pSrcUNSAFE, VARIANT* pDest) { FCALL_CONTRACT; OBJECTREF pSrc = ObjectToOBJECTREF(pSrcUNSAFE); HELPER_METHOD_FRAME_BEGIN_1(pSrc); if (pDest->vt & VT_BYREF) { OleVariant::MarshalOleRefVariantForObject(&pSrc, pDest); } else { OleVariant::MarshalOleVariantForObject(&pSrc, pDest); } HELPER_METHOD_FRAME_END(); } FCIMPLEND FCIMPL1(Object*, StubHelpers::ObjectMarshaler__ConvertToManaged, VARIANT* pSrc) { FCALL_CONTRACT; OBJECTREF retVal = NULL; HELPER_METHOD_FRAME_BEGIN_RET_1(retVal); // The IL stub is going to call ObjectMarshaler__ClearNative() afterwards. // If it doesn't it's a bug in ILObjectMarshaler. OleVariant::MarshalObjectForOleVariant(pSrc, &retVal); HELPER_METHOD_FRAME_END(); return OBJECTREFToObject(retVal); } FCIMPLEND FCIMPL1(void, StubHelpers::ObjectMarshaler__ClearNative, VARIANT* pSrc) { FCALL_CONTRACT; HELPER_METHOD_FRAME_BEGIN_0(); SafeVariantClear(pSrc); HELPER_METHOD_FRAME_END(); } FCIMPLEND #include <optsmallperfcritical.h> FCIMPL4(IUnknown*, StubHelpers::InterfaceMarshaler__ConvertToNative, Object* pObjUNSAFE, MethodTable* pItfMT, MethodTable* pClsMT, DWORD dwFlags) { FCALL_CONTRACT; if (NULL == pObjUNSAFE) { return NULL; } IUnknown *pIntf = NULL; OBJECTREF pObj = ObjectToOBJECTREF(pObjUNSAFE); // This is only called in IL stubs which are in CER, so we don't need to worry about ThreadAbort HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB_1(Frame::FRAME_ATTR_NO_THREAD_ABORT, pObj); // We're going to be making some COM calls, better initialize COM. EnsureComStarted(); pIntf = MarshalObjectToInterface(&pObj, pItfMT, pClsMT, dwFlags); // No exception will be thrown here (including thread abort as it is delayed in IL stubs) HELPER_METHOD_FRAME_END(); return pIntf; } FCIMPLEND FCIMPL4(Object*, StubHelpers::InterfaceMarshaler__ConvertToManaged, IUnknown **ppUnk, MethodTable *pItfMT, MethodTable *pClsMT, DWORD dwFlags) { FCALL_CONTRACT; if (NULL == *ppUnk) { return NULL; } OBJECTREF pObj = NULL; HELPER_METHOD_FRAME_BEGIN_RET_1(pObj); // We're going to be making some COM calls, better initialize COM. EnsureComStarted(); UnmarshalObjectFromInterface(&pObj, ppUnk, pItfMT, pClsMT, dwFlags); HELPER_METHOD_FRAME_END(); return OBJECTREFToObject(pObj); } FCIMPLEND extern "C" void QCALLTYPE InterfaceMarshaler__ClearNative(IUnknown * pUnk) { QCALL_CONTRACT; BEGIN_QCALL; ULONG cbRef = SafeReleasePreemp(pUnk); LogInteropRelease(pUnk, cbRef, "InterfaceMarshalerBase::ClearNative: In/Out release"); END_QCALL; } #include <optdefault.h> #endif // FEATURE_COMINTEROP FCIMPL0(void, StubHelpers::SetLastError) { // Make sure this is the first thing we do after returning from the target, as almost everything can cause the last error to get trashed DWORD lastError = ::GetLastError(); FCALL_CONTRACT; GetThread()->m_dwLastError = lastError; } FCIMPLEND FCIMPL0(void, StubHelpers::ClearLastError) { FCALL_CONTRACT; ::SetLastError(0); } FCIMPLEND FCIMPL1(void*, StubHelpers::GetNDirectTarget, NDirectMethodDesc* pNMD) { FCALL_CONTRACT; FCUnique(0xa2); return pNMD->GetNDirectTarget(); } FCIMPLEND FCIMPL1(void*, StubHelpers::GetDelegateTarget, DelegateObject *pThisUNSAFE) { PCODE pEntryPoint = NULL; #ifdef _DEBUG BEGIN_PRESERVE_LAST_ERROR; #endif CONTRACTL { FCALL_CHECK; PRECONDITION(CheckPointer(pThisUNSAFE)); } CONTRACTL_END; DELEGATEREF orefThis = (DELEGATEREF)ObjectToOBJECTREF(pThisUNSAFE); #if defined(HOST_64BIT) UINT_PTR target = (UINT_PTR)orefThis->GetMethodPtrAux(); // See code:GenericPInvokeCalliHelper // The lowest bit is used to distinguish between MD and target on 64-bit. target = (target << 1) | 1; #endif // HOST_64BIT pEntryPoint = orefThis->GetMethodPtrAux(); #ifdef _DEBUG END_PRESERVE_LAST_ERROR; #endif return (PVOID)pEntryPoint; } FCIMPLEND FCIMPL2(void, StubHelpers::ThrowInteropParamException, UINT resID, UINT paramIdx) { FCALL_CONTRACT; HELPER_METHOD_FRAME_BEGIN_0(); ::ThrowInteropParamException(resID, paramIdx); HELPER_METHOD_FRAME_END(); } FCIMPLEND #ifdef PROFILING_SUPPORTED FCIMPL3(SIZE_T, StubHelpers::ProfilerBeginTransitionCallback, SIZE_T pSecretParam, Thread* pThread, Object* unsafe_pThis) { FCALL_CONTRACT; // We can get here with an ngen image generated with "/prof", // even if the profiler doesn't want to track transitions. if (!CORProfilerTrackTransitions()) { return NULL; } MethodDesc* pRealMD = NULL; BEGIN_PRESERVE_LAST_ERROR; // We must transition to preemptive GC mode before calling out to the profiler, // and the transition requires us to set up a HMF. DELEGATEREF dref = (DELEGATEREF)ObjectToOBJECTREF(unsafe_pThis); HELPER_METHOD_FRAME_BEGIN_RET_1(dref); bool fReverseInterop = false; if (NULL == pThread) { // This is our signal for the reverse interop cases. fReverseInterop = true; pThread = GET_THREAD(); // the secret param in this casee is the UMEntryThunk pRealMD = ((UMEntryThunk*)pSecretParam)->GetMethod(); } else if (pSecretParam == 0) { // Secret param is null. This is the calli pinvoke case or the unmanaged delegate case. // We have an unmanaged target address but no MD. For the unmanaged delegate case, we can // still retrieve the MD by looking at the "this" object. if (dref == NULL) { // calli pinvoke case pRealMD = NULL; } else { // unmanaged delegate case MethodTable* pMT = dref->GetMethodTable(); _ASSERTE(pMT->IsDelegate()); EEClass * pClass = pMT->GetClass(); pRealMD = ((DelegateEEClass*)pClass)->GetInvokeMethod(); _ASSERTE(pRealMD); } } else { // This is either the COM interop or the pinvoke case. pRealMD = (MethodDesc*)pSecretParam; } { GCX_PREEMP_THREAD_EXISTS(pThread); if (fReverseInterop) { ProfilerUnmanagedToManagedTransitionMD(pRealMD, COR_PRF_TRANSITION_CALL); } else { ProfilerManagedToUnmanagedTransitionMD(pRealMD, COR_PRF_TRANSITION_CALL); } } HELPER_METHOD_FRAME_END(); END_PRESERVE_LAST_ERROR; return (SIZE_T)pRealMD; } FCIMPLEND FCIMPL2(void, StubHelpers::ProfilerEndTransitionCallback, MethodDesc* pRealMD, Thread* pThread) { FCALL_CONTRACT; // We can get here with an ngen image generated with "/prof", // even if the profiler doesn't want to track transitions. if (!CORProfilerTrackTransitions()) { return; } BEGIN_PRESERVE_LAST_ERROR; // We must transition to preemptive GC mode before calling out to the profiler, // and the transition requires us to set up a HMF. HELPER_METHOD_FRAME_BEGIN_0(); { bool fReverseInterop = false; if (NULL == pThread) { // if pThread is null, we are doing reverse interop pThread = GET_THREAD(); fReverseInterop = true; } GCX_PREEMP_THREAD_EXISTS(pThread); if (fReverseInterop) { ProfilerManagedToUnmanagedTransitionMD(pRealMD, COR_PRF_TRANSITION_RETURN); } else { ProfilerUnmanagedToManagedTransitionMD(pRealMD, COR_PRF_TRANSITION_RETURN); } } HELPER_METHOD_FRAME_END(); END_PRESERVE_LAST_ERROR; } FCIMPLEND #endif // PROFILING_SUPPORTED FCIMPL1(Object*, StubHelpers::GetHRExceptionObject, HRESULT hr) { FCALL_CONTRACT; OBJECTREF oThrowable = NULL; HELPER_METHOD_FRAME_BEGIN_RET_1(oThrowable); { // GetExceptionForHR uses equivalant logic as COMPlusThrowHR GetExceptionForHR(hr, &oThrowable); } HELPER_METHOD_FRAME_END(); return OBJECTREFToObject(oThrowable); } FCIMPLEND #ifdef FEATURE_COMINTEROP FCIMPL3(Object*, StubHelpers::GetCOMHRExceptionObject, HRESULT hr, MethodDesc *pMD, Object *unsafe_pThis) { FCALL_CONTRACT; OBJECTREF oThrowable = NULL; // get 'this' OBJECTREF oref = ObjectToOBJECTREF(unsafe_pThis); HELPER_METHOD_FRAME_BEGIN_RET_2(oref, oThrowable); { IErrorInfo *pErrInfo = NULL; if (pErrInfo == NULL && pMD != NULL) { // Retrieve the interface method table. MethodTable *pItfMT = ComPlusCallInfo::FromMethodDesc(pMD)->m_pInterfaceMT; // Get IUnknown pointer for this interface on this object IUnknown* pUnk = ComObject::GetComIPFromRCW(&oref, pItfMT); if (pUnk != NULL) { // Check to see if the component supports error information for this interface. IID ItfIID; pItfMT->GetGuid(&ItfIID, TRUE); pErrInfo = GetSupportedErrorInfo(pUnk, ItfIID); DWORD cbRef = SafeRelease(pUnk); LogInteropRelease(pUnk, cbRef, "IUnk to QI for ISupportsErrorInfo"); } } GetExceptionForHR(hr, pErrInfo, &oThrowable); } HELPER_METHOD_FRAME_END(); return OBJECTREFToObject(oThrowable); } FCIMPLEND #endif // FEATURE_COMINTEROP FCIMPL3(void, StubHelpers::FmtClassUpdateNativeInternal, Object* pObjUNSAFE, BYTE* pbNative, OBJECTREF *ppCleanupWorkListOnStack) { FCALL_CONTRACT; OBJECTREF pObj = ObjectToOBJECTREF(pObjUNSAFE); HELPER_METHOD_FRAME_BEGIN_1(pObj); MethodTable* pMT = pObj->GetMethodTable(); if (pMT->IsBlittable()) { memcpyNoGCRefs(pbNative, pObj->GetData(), pMT->GetNativeSize()); } else { MethodDesc* structMarshalStub; { GCX_PREEMP(); structMarshalStub = NDirect::CreateStructMarshalILStub(pMT); } MarshalStructViaILStub(structMarshalStub, pObj->GetData(), pbNative, StructMarshalStubs::MarshalOperation::Marshal, (void**)ppCleanupWorkListOnStack); } HELPER_METHOD_FRAME_END(); } FCIMPLEND FCIMPL2(void, StubHelpers::FmtClassUpdateCLRInternal, Object* pObjUNSAFE, BYTE* pbNative) { FCALL_CONTRACT; OBJECTREF pObj = ObjectToOBJECTREF(pObjUNSAFE); HELPER_METHOD_FRAME_BEGIN_1(pObj); MethodTable* pMT = pObj->GetMethodTable(); if (pMT->IsBlittable()) { memcpyNoGCRefs(pObj->GetData(), pbNative, pMT->GetNativeSize()); } else { MethodDesc* structMarshalStub; { GCX_PREEMP(); structMarshalStub = NDirect::CreateStructMarshalILStub(pMT); } MarshalStructViaILStub(structMarshalStub, pObj->GetData(), pbNative, StructMarshalStubs::MarshalOperation::Unmarshal); } HELPER_METHOD_FRAME_END(); } FCIMPLEND FCIMPL2(void, StubHelpers::LayoutDestroyNativeInternal, Object* pObjUNSAFE, BYTE* pbNative) { FCALL_CONTRACT; OBJECTREF pObj = ObjectToOBJECTREF(pObjUNSAFE); HELPER_METHOD_FRAME_BEGIN_1(pObj); MethodTable* pMT = pObj->GetMethodTable(); if (!pMT->IsBlittable()) { MethodDesc* structMarshalStub; { GCX_PREEMP(); structMarshalStub = NDirect::CreateStructMarshalILStub(pMT); } MarshalStructViaILStub(structMarshalStub, pObj->GetData(), pbNative, StructMarshalStubs::MarshalOperation::Cleanup); } HELPER_METHOD_FRAME_END(); } FCIMPLEND FCIMPL1(Object*, StubHelpers::AllocateInternal, EnregisteredTypeHandle pRegisteredTypeHnd) { FCALL_CONTRACT; TypeHandle typeHnd = TypeHandle::FromPtr(pRegisteredTypeHnd); OBJECTREF objRet = NULL; HELPER_METHOD_FRAME_BEGIN_RET_1(objRet); MethodTable* pMT = typeHnd.GetMethodTable(); objRet = pMT->Allocate(); HELPER_METHOD_FRAME_END(); return OBJECTREFToObject(objRet); } FCIMPLEND FCIMPL3(void, StubHelpers::MarshalToUnmanagedVaListInternal, va_list va, DWORD cbVaListSize, const VARARGS* pArgIterator) { FCALL_CONTRACT; HELPER_METHOD_FRAME_BEGIN_0(); VARARGS::MarshalToUnmanagedVaList(va, cbVaListSize, pArgIterator); HELPER_METHOD_FRAME_END(); } FCIMPLEND FCIMPL2(void, StubHelpers::MarshalToManagedVaListInternal, va_list va, VARARGS* pArgIterator) { FCALL_CONTRACT; VARARGS::MarshalToManagedVaList(va, pArgIterator); } FCIMPLEND FCIMPL3(void, StubHelpers::ValidateObject, Object *pObjUNSAFE, MethodDesc *pMD, Object *pThisUNSAFE) { FCALL_CONTRACT; #ifdef VERIFY_HEAP HELPER_METHOD_FRAME_BEGIN_0(); StackSString errorString; EX_TRY { AVInRuntimeImplOkayHolder AVOkay; // don't validate the next object if a BGC is in progress. we can race with background // sweep which could make the next object a Free object underneath us if it's dead. ValidateObjectInternal(pObjUNSAFE, !(GCHeapUtilities::GetGCHeap()->IsConcurrentGCInProgress())); } EX_CATCH { FormatValidationMessage(ResolveInteropMethod(pThisUNSAFE, pMD), errorString); EEPOLICY_HANDLE_FATAL_ERROR_WITH_MESSAGE(COR_E_EXECUTIONENGINE, errorString.GetUnicode()); } EX_END_CATCH_UNREACHABLE; HELPER_METHOD_FRAME_END(); #else // VERIFY_HEAP FCUnique(0xa3); UNREACHABLE_MSG("No validation support without VERIFY_HEAP"); #endif // VERIFY_HEAP } FCIMPLEND FCIMPL3(void, StubHelpers::ValidateByref, void *pByref, MethodDesc *pMD, Object *pThisUNSAFE) { FCALL_CONTRACT; #ifdef VERIFY_HEAP // We cannot validate byrefs at this point as code:GCHeap.GetContainingObject could potentially race // with allocations on other threads. We'll just remember this byref along with the interop MD and // perform the validation on next GC (see code:StubHelpers.ProcessByrefValidationList). // Skip byref if is not pointing inside managed heap if (!GCHeapUtilities::GetGCHeap()->IsHeapPointer(pByref)) { return; } ByrefValidationEntry entry; entry.pByref = pByref; entry.pMD = ResolveInteropMethod(pThisUNSAFE, pMD); HELPER_METHOD_FRAME_BEGIN_0(); SIZE_T NumOfEntries = 0; { CrstHolder ch(&s_ByrefValidationLock); if (s_ByrefValidationIndex >= s_ByrefValidationEntries.Size()) { // The validation list grows as necessary, for simplicity we never shrink it. SIZE_T newSize; if (!ClrSafeInt<SIZE_T>::multiply(s_ByrefValidationIndex, 2, newSize) || !ClrSafeInt<SIZE_T>::addition(newSize, 1, newSize)) { ThrowHR(COR_E_OVERFLOW); } s_ByrefValidationEntries.ReSizeThrows(newSize); _ASSERTE(s_ByrefValidationIndex < s_ByrefValidationEntries.Size()); } s_ByrefValidationEntries[s_ByrefValidationIndex] = entry; NumOfEntries = ++s_ByrefValidationIndex; } if (NumOfEntries > BYREF_VALIDATION_LIST_MAX_SIZE) { // if the list is too big, trigger GC now GCHeapUtilities::GetGCHeap()->GarbageCollect(0); } HELPER_METHOD_FRAME_END(); #else // VERIFY_HEAP FCUnique(0xa4); UNREACHABLE_MSG("No validation support without VERIFY_HEAP"); #endif // VERIFY_HEAP } FCIMPLEND FCIMPL0(void*, StubHelpers::GetStubContext) { FCALL_CONTRACT; FCUnique(0xa0); UNREACHABLE_MSG_RET("This is a JIT intrinsic!"); } FCIMPLEND FCIMPL2(void, StubHelpers::LogPinnedArgument, MethodDesc *target, Object *pinnedArg) { FCALL_CONTRACT; SIZE_T managedSize = 0; if (pinnedArg != NULL) { // Can pass null objects to interop, only check the size if the object is valid. managedSize = pinnedArg->GetSize(); } if (target != NULL) { STRESS_LOG3(LF_STUBS, LL_INFO100, "Managed object %#X with size '%#X' pinned for interop to Method [%pM]\n", pinnedArg, managedSize, target); } else { STRESS_LOG2(LF_STUBS, LL_INFO100, "Managed object %#X pinned for interop with size '%#X'", pinnedArg, managedSize); } } FCIMPLEND FCIMPL1(DWORD, StubHelpers::CalcVaListSize, VARARGS *varargs) { FCALL_CONTRACT; return VARARGS::CalcVaListSize(varargs); } FCIMPLEND #ifdef FEATURE_ARRAYSTUB_AS_IL NOINLINE static void ArrayTypeCheckSlow(Object* element, PtrArray* arr) { FC_INNER_PROLOG(StubHelpers::ArrayTypeCheck); HELPER_METHOD_FRAME_BEGIN_ATTRIB(Frame::FRAME_ATTR_EXACT_DEPTH|Frame::FRAME_ATTR_CAPTURE_DEPTH_2); if (!ObjIsInstanceOf(element, arr->GetArrayElementTypeHandle())) COMPlusThrow(kArrayTypeMismatchException); HELPER_METHOD_FRAME_END(); FC_INNER_EPILOG(); } FCIMPL2(void, StubHelpers::ArrayTypeCheck, Object* element, PtrArray* arr) { FCALL_CONTRACT; if (ObjIsInstanceOfCached(element, arr->GetArrayElementTypeHandle()) == TypeHandle::CanCast) return; FC_INNER_RETURN_VOID(ArrayTypeCheckSlow(element, arr)); } FCIMPLEND #endif // FEATURE_ARRAYSTUB_AS_IL #ifdef FEATURE_MULTICASTSTUB_AS_IL FCIMPL2(void, StubHelpers::MulticastDebuggerTraceHelper, Object* element, INT32 count) { FCALL_CONTRACT; FCUnique(0xa5); } FCIMPLEND #endif // FEATURE_MULTICASTSTUB_AS_IL FCIMPL0(void*, StubHelpers::NextCallReturnAddress) { FCALL_CONTRACT; UNREACHABLE_MSG("This is a JIT intrinsic!"); } FCIMPLEND
-1
dotnet/runtime
66,235
Revert "Fix issues related to JsonSerializerOptions mutation and caching."
Reverts dotnet/runtime#65863 Fixes #66232
jkotas
2022-03-05T06:23:45Z
2022-03-05T14:08:07Z
17662fc30cfd4cc6e7dbba978a3fb512380c0b70
3ede8095da51d5d27a890020b61376155e9a61c2
Revert "Fix issues related to JsonSerializerOptions mutation and caching.". Reverts dotnet/runtime#65863 Fixes #66232
./src/libraries/System.Private.Xml/src/System/Xml/Schema/XmlSchemaType.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections; using System.ComponentModel; using System.Diagnostics.CodeAnalysis; using System.Xml.Serialization; namespace System.Xml.Schema { /// <devdoc> /// <para>[To be supplied.]</para> /// </devdoc> public class XmlSchemaType : XmlSchemaAnnotated { private string? _name; private XmlSchemaDerivationMethod _final = XmlSchemaDerivationMethod.None; private XmlSchemaDerivationMethod _derivedBy; private XmlSchemaType? _baseSchemaType; private XmlSchemaDatatype? _datatype; private XmlSchemaDerivationMethod _finalResolved; private volatile SchemaElementDecl? _elementDecl; private volatile XmlQualifiedName _qname = XmlQualifiedName.Empty; private XmlSchemaType? _redefined; //compiled information private XmlSchemaContentType _contentType; /// <devdoc> /// <para>[To be supplied.]</para> /// </devdoc> public static XmlSchemaSimpleType? GetBuiltInSimpleType(XmlQualifiedName qualifiedName!!) { return DatatypeImplementation.GetSimpleTypeFromXsdType(qualifiedName); } /// <devdoc> /// <para>[To be supplied.]</para> /// </devdoc> public static XmlSchemaSimpleType GetBuiltInSimpleType(XmlTypeCode typeCode) { return DatatypeImplementation.GetSimpleTypeFromTypeCode(typeCode); } /// <devdoc> /// <para>[To be supplied.]</para> /// </devdoc> public static XmlSchemaComplexType? GetBuiltInComplexType(XmlTypeCode typeCode) { if (typeCode == XmlTypeCode.Item) { return XmlSchemaComplexType.AnyType; } return null; } /// <devdoc> /// <para>[To be supplied.]</para> /// </devdoc> public static XmlSchemaComplexType? GetBuiltInComplexType(XmlQualifiedName qualifiedName!!) { if (qualifiedName.Equals(XmlSchemaComplexType.AnyType.QualifiedName)) { return XmlSchemaComplexType.AnyType; } if (qualifiedName.Equals(XmlSchemaComplexType.UntypedAnyType.QualifiedName)) { return XmlSchemaComplexType.UntypedAnyType; } return null; } /// <devdoc> /// <para>[To be supplied.]</para> /// </devdoc> [XmlAttribute("name")] public string? Name { get { return _name; } set { _name = value; } } /// <devdoc> /// <para>[To be supplied.]</para> /// </devdoc> [XmlAttribute("final"), DefaultValue(XmlSchemaDerivationMethod.None)] public XmlSchemaDerivationMethod Final { get { return _final; } set { _final = value; } } /// <devdoc> /// <para>[To be supplied.]</para> /// </devdoc> [XmlIgnore] public XmlQualifiedName QualifiedName { get { return _qname; } } /// <devdoc> /// <para>[To be supplied.]</para> /// </devdoc> [XmlIgnore] public XmlSchemaDerivationMethod FinalResolved { get { return _finalResolved; } } /// <devdoc> /// <para>[To be supplied.]</para> /// </devdoc> [XmlIgnore] [Obsolete("XmlSchemaType.BaseSchemaType has been deprecated. Use the BaseXmlSchemaType property that returns a strongly typed base schema type instead.")] public object? BaseSchemaType { get { if (_baseSchemaType == null) return null; if (_baseSchemaType.QualifiedName.Namespace == XmlReservedNs.NsXs) { return _baseSchemaType.Datatype; } return _baseSchemaType; } } /// <devdoc> /// <para>[To be supplied.]</para> /// </devdoc> [XmlIgnore] public XmlSchemaType? BaseXmlSchemaType { get { return _baseSchemaType; } } /// <devdoc> /// <para>[To be supplied.]</para> /// </devdoc> [XmlIgnore] public XmlSchemaDerivationMethod DerivedBy { get { return _derivedBy; } } /// <devdoc> /// <para>[To be supplied.]</para> /// </devdoc> [XmlIgnore] public XmlSchemaDatatype? Datatype { get { return _datatype; } } /// <devdoc> /// <para>[To be supplied.]</para> /// </devdoc> [XmlIgnore] public virtual bool IsMixed { get { return false; } set { } } [XmlIgnore] public XmlTypeCode TypeCode { get { if (this == XmlSchemaComplexType.AnyType) { return XmlTypeCode.Item; } if (_datatype == null) { return XmlTypeCode.None; } return _datatype.TypeCode; } } [XmlIgnore] internal XmlValueConverter ValueConverter { get { if (_datatype == null) { //Default converter return XmlUntypedConverter.Untyped; } return _datatype.ValueConverter; } } [return: NotNullIfNotNull("schemaSet")] internal XmlReader? Validate(XmlReader reader, XmlResolver? resolver, XmlSchemaSet schemaSet, ValidationEventHandler valEventHandler) { if (schemaSet != null) { XmlReaderSettings readerSettings = new XmlReaderSettings(); readerSettings.ValidationType = ValidationType.Schema; readerSettings.Schemas = schemaSet; readerSettings.ValidationEventHandler += valEventHandler; return new XsdValidatingReader(reader, resolver, readerSettings, this); } return null; } internal XmlSchemaContentType SchemaContentType { get { return _contentType; } } internal void SetQualifiedName(XmlQualifiedName value) { _qname = value; } internal void SetFinalResolved(XmlSchemaDerivationMethod value) { _finalResolved = value; } internal void SetBaseSchemaType(XmlSchemaType? value) { _baseSchemaType = value; } internal void SetDerivedBy(XmlSchemaDerivationMethod value) { _derivedBy = value; } internal void SetDatatype(XmlSchemaDatatype? value) { _datatype = value; } internal SchemaElementDecl? ElementDecl { get { return _elementDecl; } set { _elementDecl = value; } } [XmlIgnore] internal XmlSchemaType? Redefined { get { return _redefined; } set { _redefined = value; } } internal virtual XmlQualifiedName DerivedFrom { get { return XmlQualifiedName.Empty; } } internal void SetContentType(XmlSchemaContentType value) { _contentType = value; } public static bool IsDerivedFrom([NotNullWhen(true)] XmlSchemaType? derivedType, [NotNullWhen(true)] XmlSchemaType? baseType, XmlSchemaDerivationMethod except) { if (derivedType == null || baseType == null) { return false; } if (derivedType == baseType) { return true; } if (baseType == XmlSchemaComplexType.AnyType) { //Not checking for restriction blocked since all types are implicitly derived by restriction from xs:anyType return true; } do { XmlSchemaSimpleType? dt = derivedType as XmlSchemaSimpleType; XmlSchemaSimpleType? bt = baseType as XmlSchemaSimpleType; if (bt != null && dt != null) { //SimpleTypes if (bt == DatatypeImplementation.AnySimpleType) { //Not checking block=restriction return true; } if ((except & derivedType.DerivedBy) != 0 || !dt.Datatype!.IsDerivedFrom(bt.Datatype!)) { return false; } return true; } else { //Complex types if ((except & derivedType.DerivedBy) != 0) { return false; } derivedType = derivedType.BaseXmlSchemaType; if (derivedType == baseType) { return true; } } } while (derivedType != null); return false; } internal static bool IsDerivedFromDatatype(XmlSchemaDatatype derivedDataType, XmlSchemaDatatype baseDataType, XmlSchemaDerivationMethod except) { if (DatatypeImplementation.AnySimpleType.Datatype == baseDataType) { return true; } return derivedDataType.IsDerivedFrom(baseDataType); } [XmlIgnore] internal override string? NameAttribute { get { return Name; } set { Name = value; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections; using System.ComponentModel; using System.Diagnostics.CodeAnalysis; using System.Xml.Serialization; namespace System.Xml.Schema { /// <devdoc> /// <para>[To be supplied.]</para> /// </devdoc> public class XmlSchemaType : XmlSchemaAnnotated { private string? _name; private XmlSchemaDerivationMethod _final = XmlSchemaDerivationMethod.None; private XmlSchemaDerivationMethod _derivedBy; private XmlSchemaType? _baseSchemaType; private XmlSchemaDatatype? _datatype; private XmlSchemaDerivationMethod _finalResolved; private volatile SchemaElementDecl? _elementDecl; private volatile XmlQualifiedName _qname = XmlQualifiedName.Empty; private XmlSchemaType? _redefined; //compiled information private XmlSchemaContentType _contentType; /// <devdoc> /// <para>[To be supplied.]</para> /// </devdoc> public static XmlSchemaSimpleType? GetBuiltInSimpleType(XmlQualifiedName qualifiedName!!) { return DatatypeImplementation.GetSimpleTypeFromXsdType(qualifiedName); } /// <devdoc> /// <para>[To be supplied.]</para> /// </devdoc> public static XmlSchemaSimpleType GetBuiltInSimpleType(XmlTypeCode typeCode) { return DatatypeImplementation.GetSimpleTypeFromTypeCode(typeCode); } /// <devdoc> /// <para>[To be supplied.]</para> /// </devdoc> public static XmlSchemaComplexType? GetBuiltInComplexType(XmlTypeCode typeCode) { if (typeCode == XmlTypeCode.Item) { return XmlSchemaComplexType.AnyType; } return null; } /// <devdoc> /// <para>[To be supplied.]</para> /// </devdoc> public static XmlSchemaComplexType? GetBuiltInComplexType(XmlQualifiedName qualifiedName!!) { if (qualifiedName.Equals(XmlSchemaComplexType.AnyType.QualifiedName)) { return XmlSchemaComplexType.AnyType; } if (qualifiedName.Equals(XmlSchemaComplexType.UntypedAnyType.QualifiedName)) { return XmlSchemaComplexType.UntypedAnyType; } return null; } /// <devdoc> /// <para>[To be supplied.]</para> /// </devdoc> [XmlAttribute("name")] public string? Name { get { return _name; } set { _name = value; } } /// <devdoc> /// <para>[To be supplied.]</para> /// </devdoc> [XmlAttribute("final"), DefaultValue(XmlSchemaDerivationMethod.None)] public XmlSchemaDerivationMethod Final { get { return _final; } set { _final = value; } } /// <devdoc> /// <para>[To be supplied.]</para> /// </devdoc> [XmlIgnore] public XmlQualifiedName QualifiedName { get { return _qname; } } /// <devdoc> /// <para>[To be supplied.]</para> /// </devdoc> [XmlIgnore] public XmlSchemaDerivationMethod FinalResolved { get { return _finalResolved; } } /// <devdoc> /// <para>[To be supplied.]</para> /// </devdoc> [XmlIgnore] [Obsolete("XmlSchemaType.BaseSchemaType has been deprecated. Use the BaseXmlSchemaType property that returns a strongly typed base schema type instead.")] public object? BaseSchemaType { get { if (_baseSchemaType == null) return null; if (_baseSchemaType.QualifiedName.Namespace == XmlReservedNs.NsXs) { return _baseSchemaType.Datatype; } return _baseSchemaType; } } /// <devdoc> /// <para>[To be supplied.]</para> /// </devdoc> [XmlIgnore] public XmlSchemaType? BaseXmlSchemaType { get { return _baseSchemaType; } } /// <devdoc> /// <para>[To be supplied.]</para> /// </devdoc> [XmlIgnore] public XmlSchemaDerivationMethod DerivedBy { get { return _derivedBy; } } /// <devdoc> /// <para>[To be supplied.]</para> /// </devdoc> [XmlIgnore] public XmlSchemaDatatype? Datatype { get { return _datatype; } } /// <devdoc> /// <para>[To be supplied.]</para> /// </devdoc> [XmlIgnore] public virtual bool IsMixed { get { return false; } set { } } [XmlIgnore] public XmlTypeCode TypeCode { get { if (this == XmlSchemaComplexType.AnyType) { return XmlTypeCode.Item; } if (_datatype == null) { return XmlTypeCode.None; } return _datatype.TypeCode; } } [XmlIgnore] internal XmlValueConverter ValueConverter { get { if (_datatype == null) { //Default converter return XmlUntypedConverter.Untyped; } return _datatype.ValueConverter; } } [return: NotNullIfNotNull("schemaSet")] internal XmlReader? Validate(XmlReader reader, XmlResolver? resolver, XmlSchemaSet schemaSet, ValidationEventHandler valEventHandler) { if (schemaSet != null) { XmlReaderSettings readerSettings = new XmlReaderSettings(); readerSettings.ValidationType = ValidationType.Schema; readerSettings.Schemas = schemaSet; readerSettings.ValidationEventHandler += valEventHandler; return new XsdValidatingReader(reader, resolver, readerSettings, this); } return null; } internal XmlSchemaContentType SchemaContentType { get { return _contentType; } } internal void SetQualifiedName(XmlQualifiedName value) { _qname = value; } internal void SetFinalResolved(XmlSchemaDerivationMethod value) { _finalResolved = value; } internal void SetBaseSchemaType(XmlSchemaType? value) { _baseSchemaType = value; } internal void SetDerivedBy(XmlSchemaDerivationMethod value) { _derivedBy = value; } internal void SetDatatype(XmlSchemaDatatype? value) { _datatype = value; } internal SchemaElementDecl? ElementDecl { get { return _elementDecl; } set { _elementDecl = value; } } [XmlIgnore] internal XmlSchemaType? Redefined { get { return _redefined; } set { _redefined = value; } } internal virtual XmlQualifiedName DerivedFrom { get { return XmlQualifiedName.Empty; } } internal void SetContentType(XmlSchemaContentType value) { _contentType = value; } public static bool IsDerivedFrom([NotNullWhen(true)] XmlSchemaType? derivedType, [NotNullWhen(true)] XmlSchemaType? baseType, XmlSchemaDerivationMethod except) { if (derivedType == null || baseType == null) { return false; } if (derivedType == baseType) { return true; } if (baseType == XmlSchemaComplexType.AnyType) { //Not checking for restriction blocked since all types are implicitly derived by restriction from xs:anyType return true; } do { XmlSchemaSimpleType? dt = derivedType as XmlSchemaSimpleType; XmlSchemaSimpleType? bt = baseType as XmlSchemaSimpleType; if (bt != null && dt != null) { //SimpleTypes if (bt == DatatypeImplementation.AnySimpleType) { //Not checking block=restriction return true; } if ((except & derivedType.DerivedBy) != 0 || !dt.Datatype!.IsDerivedFrom(bt.Datatype!)) { return false; } return true; } else { //Complex types if ((except & derivedType.DerivedBy) != 0) { return false; } derivedType = derivedType.BaseXmlSchemaType; if (derivedType == baseType) { return true; } } } while (derivedType != null); return false; } internal static bool IsDerivedFromDatatype(XmlSchemaDatatype derivedDataType, XmlSchemaDatatype baseDataType, XmlSchemaDerivationMethod except) { if (DatatypeImplementation.AnySimpleType.Datatype == baseDataType) { return true; } return derivedDataType.IsDerivedFrom(baseDataType); } [XmlIgnore] internal override string? NameAttribute { get { return Name; } set { Name = value; } } } }
-1
dotnet/runtime
66,235
Revert "Fix issues related to JsonSerializerOptions mutation and caching."
Reverts dotnet/runtime#65863 Fixes #66232
jkotas
2022-03-05T06:23:45Z
2022-03-05T14:08:07Z
17662fc30cfd4cc6e7dbba978a3fb512380c0b70
3ede8095da51d5d27a890020b61376155e9a61c2
Revert "Fix issues related to JsonSerializerOptions mutation and caching.". Reverts dotnet/runtime#65863 Fixes #66232
./eng/pipelines/coreclr/runincontext.yml
trigger: none schedules: - cron: "0 13 * * 6,0" displayName: Sat and Sun at 5:00 AM (UTC-8:00) branches: include: - main always: true jobs: - template: /eng/pipelines/common/platform-matrix.yml parameters: jobTemplate: /eng/pipelines/common/build-coreclr-and-libraries-job.yml buildConfig: checked platforms: - Linux_x64 - windows_x64 - windows_x86 - CoreClrTestBuildHost # Either OSX_x64 or Linux_x64 jobParameters: testGroup: outerloop - template: /eng/pipelines/common/platform-matrix.yml parameters: jobTemplate: /eng/pipelines/common/templates/runtimes/build-test-job.yml buildConfig: checked platforms: - CoreClrTestBuildHost # Either OSX_x64 or Linux_x64 jobParameters: testGroup: outerloop - template: /eng/pipelines/common/platform-matrix.yml parameters: jobTemplate: /eng/pipelines/common/templates/runtimes/run-test-job.yml buildConfig: checked platforms: - Linux_x64 - windows_x64 - windows_x86 helixQueueGroup: ci helixQueuesTemplate: /eng/pipelines/coreclr/templates/helix-queues-setup.yml jobParameters: testGroup: outerloop runInUnloadableContext: true displayNameArgs: RunInContext liveLibrariesBuildConfig: Release
trigger: none schedules: - cron: "0 13 * * 6,0" displayName: Sat and Sun at 5:00 AM (UTC-8:00) branches: include: - main always: true jobs: - template: /eng/pipelines/common/platform-matrix.yml parameters: jobTemplate: /eng/pipelines/common/build-coreclr-and-libraries-job.yml buildConfig: checked platforms: - Linux_x64 - windows_x64 - windows_x86 - CoreClrTestBuildHost # Either OSX_x64 or Linux_x64 jobParameters: testGroup: outerloop - template: /eng/pipelines/common/platform-matrix.yml parameters: jobTemplate: /eng/pipelines/common/templates/runtimes/build-test-job.yml buildConfig: checked platforms: - CoreClrTestBuildHost # Either OSX_x64 or Linux_x64 jobParameters: testGroup: outerloop - template: /eng/pipelines/common/platform-matrix.yml parameters: jobTemplate: /eng/pipelines/common/templates/runtimes/run-test-job.yml buildConfig: checked platforms: - Linux_x64 - windows_x64 - windows_x86 helixQueueGroup: ci helixQueuesTemplate: /eng/pipelines/coreclr/templates/helix-queues-setup.yml jobParameters: testGroup: outerloop runInUnloadableContext: true displayNameArgs: RunInContext liveLibrariesBuildConfig: Release
-1
dotnet/runtime
66,235
Revert "Fix issues related to JsonSerializerOptions mutation and caching."
Reverts dotnet/runtime#65863 Fixes #66232
jkotas
2022-03-05T06:23:45Z
2022-03-05T14:08:07Z
17662fc30cfd4cc6e7dbba978a3fb512380c0b70
3ede8095da51d5d27a890020b61376155e9a61c2
Revert "Fix issues related to JsonSerializerOptions mutation and caching.". Reverts dotnet/runtime#65863 Fixes #66232
./src/libraries/System.ComponentModel.TypeConverter/src/System/ComponentModel/INestedContainer.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. namespace System.ComponentModel { /// <summary> /// A "nested container" is an object that logically contains zero or more child /// components and is controlled (owned) by some parent component. /// /// In this context, "containment" refers to logical containment, not visual /// containment. Components and containers can be used in a variety of /// scenarios, including both visual and non-visual scenarios. /// </summary> public interface INestedContainer : IContainer { /// <summary> /// The component that owns this nested container. /// </summary> IComponent Owner { get; } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. namespace System.ComponentModel { /// <summary> /// A "nested container" is an object that logically contains zero or more child /// components and is controlled (owned) by some parent component. /// /// In this context, "containment" refers to logical containment, not visual /// containment. Components and containers can be used in a variety of /// scenarios, including both visual and non-visual scenarios. /// </summary> public interface INestedContainer : IContainer { /// <summary> /// The component that owns this nested container. /// </summary> IComponent Owner { get; } } }
-1
dotnet/runtime
66,235
Revert "Fix issues related to JsonSerializerOptions mutation and caching."
Reverts dotnet/runtime#65863 Fixes #66232
jkotas
2022-03-05T06:23:45Z
2022-03-05T14:08:07Z
17662fc30cfd4cc6e7dbba978a3fb512380c0b70
3ede8095da51d5d27a890020b61376155e9a61c2
Revert "Fix issues related to JsonSerializerOptions mutation and caching.". Reverts dotnet/runtime#65863 Fixes #66232
./src/tests/Loader/classloader/TypeGeneratorTests/TypeGeneratorTest483/Generated483.ilproj
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <ItemGroup> <Compile Include="Generated483.il" /> </ItemGroup> <ItemGroup> <ProjectReference Include="..\TestFramework\TestFramework.csproj" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <ItemGroup> <Compile Include="Generated483.il" /> </ItemGroup> <ItemGroup> <ProjectReference Include="..\TestFramework\TestFramework.csproj" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,235
Revert "Fix issues related to JsonSerializerOptions mutation and caching."
Reverts dotnet/runtime#65863 Fixes #66232
jkotas
2022-03-05T06:23:45Z
2022-03-05T14:08:07Z
17662fc30cfd4cc6e7dbba978a3fb512380c0b70
3ede8095da51d5d27a890020b61376155e9a61c2
Revert "Fix issues related to JsonSerializerOptions mutation and caching.". Reverts dotnet/runtime#65863 Fixes #66232
./src/tests/JIT/HardwareIntrinsics/General/Vector64/Abs.Int32.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; namespace JIT.HardwareIntrinsics.General { public static partial class Program { private static void AbsInt32() { var test = new VectorUnaryOpTest__AbsInt32(); // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); // Validates passing a static member works test.RunClsVarScenario(); // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); // Validates passing the field of a local class works test.RunClassLclFldScenario(); // Validates passing an instance member of a class works test.RunClassFldScenario(); // Validates passing the field of a local struct works test.RunStructLclFldScenario(); // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class VectorUnaryOpTest__AbsInt32 { private struct DataTable { private byte[] inArray1; private byte[] outArray; private GCHandle inHandle1; private GCHandle outHandle; private ulong alignment; public DataTable(Int32[] inArray1, Int32[] outArray, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Int32>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Int32>(); if ((alignment != 32 && alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Int32, byte>(ref inArray1[0]), (uint)sizeOfinArray1); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector64<Int32> _fld1; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int32>, byte>(ref testStruct._fld1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int32>>()); return testStruct; } public void RunStructFldScenario(VectorUnaryOpTest__AbsInt32 testClass) { var result = Vector64.Abs(_fld1); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, testClass._dataTable.outArrayPtr); } } private static readonly int LargestVectorSize = 8; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector64<Int32>>() / sizeof(Int32); private static readonly int RetElementCount = Unsafe.SizeOf<Vector64<Int32>>() / sizeof(Int32); private static Int32[] _data1 = new Int32[Op1ElementCount]; private static Vector64<Int32> _clsVar1; private Vector64<Int32> _fld1; private DataTable _dataTable; static VectorUnaryOpTest__AbsInt32() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int32>, byte>(ref _clsVar1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int32>>()); } public VectorUnaryOpTest__AbsInt32() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int32>, byte>(ref _fld1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int32>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); } _dataTable = new DataTable(_data1, new Int32[RetElementCount], LargestVectorSize); } public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = Vector64.Abs( Unsafe.Read<Vector64<Int32>>(_dataTable.inArray1Ptr) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var method = typeof(Vector64).GetMethod(nameof(Vector64.Abs), new Type[] { typeof(Vector64<Int32>) }); if (method is null) { method = typeof(Vector64).GetMethod(nameof(Vector64.Abs), 1, new Type[] { typeof(Vector64<>).MakeGenericType(Type.MakeGenericMethodParameter(0)) }); } if (method.IsGenericMethodDefinition) { method = method.MakeGenericMethod(typeof(Int32)); } var result = method.Invoke(null, new object[] { Unsafe.Read<Vector64<Int32>>(_dataTable.inArray1Ptr) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector64<Int32>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = Vector64.Abs( _clsVar1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _dataTable.outArrayPtr); } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector64<Int32>>(_dataTable.inArray1Ptr); var result = Vector64.Abs(op1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new VectorUnaryOpTest__AbsInt32(); var result = Vector64.Abs(test._fld1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, _dataTable.outArrayPtr); } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = Vector64.Abs(_fld1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _dataTable.outArrayPtr); } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = Vector64.Abs(test._fld1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } private void ValidateResult(Vector64<Int32> op1, void* result, [CallerMemberName] string method = "") { Int32[] inArray1 = new Int32[Op1ElementCount]; Int32[] outArray = new Int32[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Int32, byte>(ref inArray1[0]), op1); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<Int32>>()); ValidateResult(inArray1, outArray, method); } private void ValidateResult(void* op1, void* result, [CallerMemberName] string method = "") { Int32[] inArray1 = new Int32[Op1ElementCount]; Int32[] outArray = new Int32[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector64<Int32>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<Int32>>()); ValidateResult(inArray1, outArray, method); } private void ValidateResult(Int32[] firstOp, Int32[] result, [CallerMemberName] string method = "") { bool succeeded = true; if (result[0] != Math.Abs(firstOp[0])) { succeeded = false; } else { for (var i = 1; i < RetElementCount; i++) { if (result[i] != Math.Abs(firstOp[i])) { succeeded = false; break; } } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(Vector64)}.{nameof(Vector64.Abs)}<Int32>(Vector64<Int32>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; namespace JIT.HardwareIntrinsics.General { public static partial class Program { private static void AbsInt32() { var test = new VectorUnaryOpTest__AbsInt32(); // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); // Validates passing a static member works test.RunClsVarScenario(); // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); // Validates passing the field of a local class works test.RunClassLclFldScenario(); // Validates passing an instance member of a class works test.RunClassFldScenario(); // Validates passing the field of a local struct works test.RunStructLclFldScenario(); // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class VectorUnaryOpTest__AbsInt32 { private struct DataTable { private byte[] inArray1; private byte[] outArray; private GCHandle inHandle1; private GCHandle outHandle; private ulong alignment; public DataTable(Int32[] inArray1, Int32[] outArray, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Int32>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Int32>(); if ((alignment != 32 && alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Int32, byte>(ref inArray1[0]), (uint)sizeOfinArray1); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector64<Int32> _fld1; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int32>, byte>(ref testStruct._fld1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int32>>()); return testStruct; } public void RunStructFldScenario(VectorUnaryOpTest__AbsInt32 testClass) { var result = Vector64.Abs(_fld1); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, testClass._dataTable.outArrayPtr); } } private static readonly int LargestVectorSize = 8; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector64<Int32>>() / sizeof(Int32); private static readonly int RetElementCount = Unsafe.SizeOf<Vector64<Int32>>() / sizeof(Int32); private static Int32[] _data1 = new Int32[Op1ElementCount]; private static Vector64<Int32> _clsVar1; private Vector64<Int32> _fld1; private DataTable _dataTable; static VectorUnaryOpTest__AbsInt32() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int32>, byte>(ref _clsVar1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int32>>()); } public VectorUnaryOpTest__AbsInt32() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int32>, byte>(ref _fld1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int32>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); } _dataTable = new DataTable(_data1, new Int32[RetElementCount], LargestVectorSize); } public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = Vector64.Abs( Unsafe.Read<Vector64<Int32>>(_dataTable.inArray1Ptr) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var method = typeof(Vector64).GetMethod(nameof(Vector64.Abs), new Type[] { typeof(Vector64<Int32>) }); if (method is null) { method = typeof(Vector64).GetMethod(nameof(Vector64.Abs), 1, new Type[] { typeof(Vector64<>).MakeGenericType(Type.MakeGenericMethodParameter(0)) }); } if (method.IsGenericMethodDefinition) { method = method.MakeGenericMethod(typeof(Int32)); } var result = method.Invoke(null, new object[] { Unsafe.Read<Vector64<Int32>>(_dataTable.inArray1Ptr) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector64<Int32>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = Vector64.Abs( _clsVar1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _dataTable.outArrayPtr); } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector64<Int32>>(_dataTable.inArray1Ptr); var result = Vector64.Abs(op1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new VectorUnaryOpTest__AbsInt32(); var result = Vector64.Abs(test._fld1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, _dataTable.outArrayPtr); } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = Vector64.Abs(_fld1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _dataTable.outArrayPtr); } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = Vector64.Abs(test._fld1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } private void ValidateResult(Vector64<Int32> op1, void* result, [CallerMemberName] string method = "") { Int32[] inArray1 = new Int32[Op1ElementCount]; Int32[] outArray = new Int32[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Int32, byte>(ref inArray1[0]), op1); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<Int32>>()); ValidateResult(inArray1, outArray, method); } private void ValidateResult(void* op1, void* result, [CallerMemberName] string method = "") { Int32[] inArray1 = new Int32[Op1ElementCount]; Int32[] outArray = new Int32[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector64<Int32>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<Int32>>()); ValidateResult(inArray1, outArray, method); } private void ValidateResult(Int32[] firstOp, Int32[] result, [CallerMemberName] string method = "") { bool succeeded = true; if (result[0] != Math.Abs(firstOp[0])) { succeeded = false; } else { for (var i = 1; i < RetElementCount; i++) { if (result[i] != Math.Abs(firstOp[i])) { succeeded = false; break; } } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(Vector64)}.{nameof(Vector64.Abs)}<Int32>(Vector64<Int32>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
-1
dotnet/runtime
66,235
Revert "Fix issues related to JsonSerializerOptions mutation and caching."
Reverts dotnet/runtime#65863 Fixes #66232
jkotas
2022-03-05T06:23:45Z
2022-03-05T14:08:07Z
17662fc30cfd4cc6e7dbba978a3fb512380c0b70
3ede8095da51d5d27a890020b61376155e9a61c2
Revert "Fix issues related to JsonSerializerOptions mutation and caching.". Reverts dotnet/runtime#65863 Fixes #66232
./src/tests/JIT/HardwareIntrinsics/X86/Avx2/InsertVector128.Int32.1.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.X86; using static System.Runtime.Intrinsics.X86.Sse; using static System.Runtime.Intrinsics.X86.Sse2; namespace JIT.HardwareIntrinsics.X86 { public static partial class Program { private static void InsertVector128Int321() { var test = new InsertVector128Test__InsertVector128Int321(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (Avx.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); // Validates basic functionality works, using LoadAligned test.RunBasicScenario_LoadAligned(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (Avx.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); // Validates calling via reflection works, using LoadAligned test.RunReflectionScenario_LoadAligned(); } // Validates passing a static member works test.RunClsVarScenario(); // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (Avx.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); // Validates passing a local works, using LoadAligned test.RunLclVarScenario_LoadAligned(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); // Validates passing an instance member of a class works test.RunClassFldScenario(); // Validates passing the field of a local struct works test.RunStructLclFldScenario(); // Validates passing an instance member of a struct works test.RunStructFldScenario(); } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class InsertVector128Test__InsertVector128Int321 { private struct TestStruct { public Vector256<Int32> _fld1; public Vector128<Int32> _fld2; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Int32>, byte>(ref testStruct._fld1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector256<Int32>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref testStruct._fld2), ref Unsafe.As<Int32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>()); return testStruct; } public void RunStructFldScenario(InsertVector128Test__InsertVector128Int321 testClass) { var result = Avx2.InsertVector128(_fld1, _fld2, 1); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } } private static readonly int LargestVectorSize = 32; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector256<Int32>>() / sizeof(Int32); private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector128<Int32>>() / sizeof(Int32); private static readonly int RetElementCount = Unsafe.SizeOf<Vector256<Int32>>() / sizeof(Int32); private static Int32[] _data1 = new Int32[Op1ElementCount]; private static Int32[] _data2 = new Int32[Op2ElementCount]; private static Vector256<Int32> _clsVar1; private static Vector128<Int32> _clsVar2; private Vector256<Int32> _fld1; private Vector128<Int32> _fld2; private SimpleBinaryOpTest__DataTable<Int32, Int32, Int32> _dataTable; static InsertVector128Test__InsertVector128Int321() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Int32>, byte>(ref _clsVar1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector256<Int32>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref _clsVar2), ref Unsafe.As<Int32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>()); } public InsertVector128Test__InsertVector128Int321() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Int32>, byte>(ref _fld1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector256<Int32>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref _fld2), ref Unsafe.As<Int32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); } for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); } _dataTable = new SimpleBinaryOpTest__DataTable<Int32, Int32, Int32>(_data1, _data2, new Int32[RetElementCount], LargestVectorSize); } public bool IsSupported => Avx2.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = Avx2.InsertVector128( Unsafe.Read<Vector256<Int32>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector128<Int32>>(_dataTable.inArray2Ptr), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); var result = Avx2.InsertVector128( Avx.LoadVector256((Int32*)(_dataTable.inArray1Ptr)), LoadVector128((Int32*)(_dataTable.inArray2Ptr)), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunBasicScenario_LoadAligned() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_LoadAligned)); var result = Avx2.InsertVector128( Avx.LoadAlignedVector256((Int32*)(_dataTable.inArray1Ptr)), LoadAlignedVector128((Int32*)(_dataTable.inArray2Ptr)), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(Avx2).GetMethod(nameof(Avx2.InsertVector128), new Type[] { typeof(Vector256<Int32>), typeof(Vector128<Int32>), typeof(byte) }) .Invoke(null, new object[] { Unsafe.Read<Vector256<Int32>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector128<Int32>>(_dataTable.inArray2Ptr), (byte)1 }); Unsafe.Write(_dataTable.outArrayPtr, (Vector256<Int32>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); var result = typeof(Avx2).GetMethod(nameof(Avx2.InsertVector128), new Type[] { typeof(Vector256<Int32>), typeof(Vector128<Int32>), typeof(byte) }) .Invoke(null, new object[] { Avx.LoadVector256((Int32*)(_dataTable.inArray1Ptr)), LoadVector128((Int32*)(_dataTable.inArray2Ptr)), (byte)1 }); Unsafe.Write(_dataTable.outArrayPtr, (Vector256<Int32>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_LoadAligned() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_LoadAligned)); var result = typeof(Avx2).GetMethod(nameof(Avx2.InsertVector128), new Type[] { typeof(Vector256<Int32>), typeof(Vector128<Int32>), typeof(byte) }) .Invoke(null, new object[] { Avx.LoadAlignedVector256((Int32*)(_dataTable.inArray1Ptr)), LoadAlignedVector128((Int32*)(_dataTable.inArray2Ptr)), (byte)1 }); Unsafe.Write(_dataTable.outArrayPtr, (Vector256<Int32>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = Avx2.InsertVector128( _clsVar1, _clsVar2, 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var left = Unsafe.Read<Vector256<Int32>>(_dataTable.inArray1Ptr); var right = Unsafe.Read<Vector128<Int32>>(_dataTable.inArray2Ptr); var result = Avx2.InsertVector128(left, right, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(left, right, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var left = Avx.LoadVector256((Int32*)(_dataTable.inArray1Ptr)); var right = LoadVector128((Int32*)(_dataTable.inArray2Ptr)); var result = Avx2.InsertVector128(left, right, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(left, right, _dataTable.outArrayPtr); } public void RunLclVarScenario_LoadAligned() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_LoadAligned)); var left = Avx.LoadAlignedVector256((Int32*)(_dataTable.inArray1Ptr)); var right = LoadAlignedVector128((Int32*)(_dataTable.inArray2Ptr)); var result = Avx2.InsertVector128(left, right, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(left, right, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new InsertVector128Test__InsertVector128Int321(); var result = Avx2.InsertVector128(test._fld1, test._fld2, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = Avx2.InsertVector128(_fld1, _fld2, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = Avx2.InsertVector128(test._fld1, test._fld2, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector256<Int32> left, Vector128<Int32> right, void* result, [CallerMemberName] string method = "") { Int32[] inArray1 = new Int32[Op1ElementCount]; Int32[] inArray2 = new Int32[Op2ElementCount]; Int32[] outArray = new Int32[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Int32, byte>(ref inArray1[0]), left); Unsafe.WriteUnaligned(ref Unsafe.As<Int32, byte>(ref inArray2[0]), right); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector256<Int32>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(void* left, void* right, void* result, [CallerMemberName] string method = "") { Int32[] inArray1 = new Int32[Op1ElementCount]; Int32[] inArray2 = new Int32[Op2ElementCount]; Int32[] outArray = new Int32[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(left), (uint)Unsafe.SizeOf<Vector256<Int32>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(right), (uint)Unsafe.SizeOf<Vector128<Int32>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector256<Int32>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(Int32[] left, Int32[] right, Int32[] result, [CallerMemberName] string method = "") { bool succeeded = true; if (result[0] != left[0]) { succeeded = false; } else { for (var i = 1; i < RetElementCount; i++) { if ((i > 3 ? result[i] != right[i - 4] : result[i] != left[i])) { succeeded = false; break; } } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(Avx2)}.{nameof(Avx2.InsertVector128)}<Int32>(Vector256<Int32>, Vector128<Int32>.1): {method} failed:"); TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})"); TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.X86; using static System.Runtime.Intrinsics.X86.Sse; using static System.Runtime.Intrinsics.X86.Sse2; namespace JIT.HardwareIntrinsics.X86 { public static partial class Program { private static void InsertVector128Int321() { var test = new InsertVector128Test__InsertVector128Int321(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (Avx.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); // Validates basic functionality works, using LoadAligned test.RunBasicScenario_LoadAligned(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (Avx.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); // Validates calling via reflection works, using LoadAligned test.RunReflectionScenario_LoadAligned(); } // Validates passing a static member works test.RunClsVarScenario(); // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (Avx.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); // Validates passing a local works, using LoadAligned test.RunLclVarScenario_LoadAligned(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); // Validates passing an instance member of a class works test.RunClassFldScenario(); // Validates passing the field of a local struct works test.RunStructLclFldScenario(); // Validates passing an instance member of a struct works test.RunStructFldScenario(); } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class InsertVector128Test__InsertVector128Int321 { private struct TestStruct { public Vector256<Int32> _fld1; public Vector128<Int32> _fld2; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Int32>, byte>(ref testStruct._fld1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector256<Int32>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref testStruct._fld2), ref Unsafe.As<Int32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>()); return testStruct; } public void RunStructFldScenario(InsertVector128Test__InsertVector128Int321 testClass) { var result = Avx2.InsertVector128(_fld1, _fld2, 1); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } } private static readonly int LargestVectorSize = 32; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector256<Int32>>() / sizeof(Int32); private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector128<Int32>>() / sizeof(Int32); private static readonly int RetElementCount = Unsafe.SizeOf<Vector256<Int32>>() / sizeof(Int32); private static Int32[] _data1 = new Int32[Op1ElementCount]; private static Int32[] _data2 = new Int32[Op2ElementCount]; private static Vector256<Int32> _clsVar1; private static Vector128<Int32> _clsVar2; private Vector256<Int32> _fld1; private Vector128<Int32> _fld2; private SimpleBinaryOpTest__DataTable<Int32, Int32, Int32> _dataTable; static InsertVector128Test__InsertVector128Int321() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Int32>, byte>(ref _clsVar1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector256<Int32>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref _clsVar2), ref Unsafe.As<Int32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>()); } public InsertVector128Test__InsertVector128Int321() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Int32>, byte>(ref _fld1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector256<Int32>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref _fld2), ref Unsafe.As<Int32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); } for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); } _dataTable = new SimpleBinaryOpTest__DataTable<Int32, Int32, Int32>(_data1, _data2, new Int32[RetElementCount], LargestVectorSize); } public bool IsSupported => Avx2.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = Avx2.InsertVector128( Unsafe.Read<Vector256<Int32>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector128<Int32>>(_dataTable.inArray2Ptr), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); var result = Avx2.InsertVector128( Avx.LoadVector256((Int32*)(_dataTable.inArray1Ptr)), LoadVector128((Int32*)(_dataTable.inArray2Ptr)), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunBasicScenario_LoadAligned() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_LoadAligned)); var result = Avx2.InsertVector128( Avx.LoadAlignedVector256((Int32*)(_dataTable.inArray1Ptr)), LoadAlignedVector128((Int32*)(_dataTable.inArray2Ptr)), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(Avx2).GetMethod(nameof(Avx2.InsertVector128), new Type[] { typeof(Vector256<Int32>), typeof(Vector128<Int32>), typeof(byte) }) .Invoke(null, new object[] { Unsafe.Read<Vector256<Int32>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector128<Int32>>(_dataTable.inArray2Ptr), (byte)1 }); Unsafe.Write(_dataTable.outArrayPtr, (Vector256<Int32>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); var result = typeof(Avx2).GetMethod(nameof(Avx2.InsertVector128), new Type[] { typeof(Vector256<Int32>), typeof(Vector128<Int32>), typeof(byte) }) .Invoke(null, new object[] { Avx.LoadVector256((Int32*)(_dataTable.inArray1Ptr)), LoadVector128((Int32*)(_dataTable.inArray2Ptr)), (byte)1 }); Unsafe.Write(_dataTable.outArrayPtr, (Vector256<Int32>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_LoadAligned() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_LoadAligned)); var result = typeof(Avx2).GetMethod(nameof(Avx2.InsertVector128), new Type[] { typeof(Vector256<Int32>), typeof(Vector128<Int32>), typeof(byte) }) .Invoke(null, new object[] { Avx.LoadAlignedVector256((Int32*)(_dataTable.inArray1Ptr)), LoadAlignedVector128((Int32*)(_dataTable.inArray2Ptr)), (byte)1 }); Unsafe.Write(_dataTable.outArrayPtr, (Vector256<Int32>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = Avx2.InsertVector128( _clsVar1, _clsVar2, 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var left = Unsafe.Read<Vector256<Int32>>(_dataTable.inArray1Ptr); var right = Unsafe.Read<Vector128<Int32>>(_dataTable.inArray2Ptr); var result = Avx2.InsertVector128(left, right, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(left, right, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var left = Avx.LoadVector256((Int32*)(_dataTable.inArray1Ptr)); var right = LoadVector128((Int32*)(_dataTable.inArray2Ptr)); var result = Avx2.InsertVector128(left, right, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(left, right, _dataTable.outArrayPtr); } public void RunLclVarScenario_LoadAligned() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_LoadAligned)); var left = Avx.LoadAlignedVector256((Int32*)(_dataTable.inArray1Ptr)); var right = LoadAlignedVector128((Int32*)(_dataTable.inArray2Ptr)); var result = Avx2.InsertVector128(left, right, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(left, right, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new InsertVector128Test__InsertVector128Int321(); var result = Avx2.InsertVector128(test._fld1, test._fld2, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = Avx2.InsertVector128(_fld1, _fld2, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = Avx2.InsertVector128(test._fld1, test._fld2, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector256<Int32> left, Vector128<Int32> right, void* result, [CallerMemberName] string method = "") { Int32[] inArray1 = new Int32[Op1ElementCount]; Int32[] inArray2 = new Int32[Op2ElementCount]; Int32[] outArray = new Int32[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Int32, byte>(ref inArray1[0]), left); Unsafe.WriteUnaligned(ref Unsafe.As<Int32, byte>(ref inArray2[0]), right); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector256<Int32>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(void* left, void* right, void* result, [CallerMemberName] string method = "") { Int32[] inArray1 = new Int32[Op1ElementCount]; Int32[] inArray2 = new Int32[Op2ElementCount]; Int32[] outArray = new Int32[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(left), (uint)Unsafe.SizeOf<Vector256<Int32>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(right), (uint)Unsafe.SizeOf<Vector128<Int32>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector256<Int32>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(Int32[] left, Int32[] right, Int32[] result, [CallerMemberName] string method = "") { bool succeeded = true; if (result[0] != left[0]) { succeeded = false; } else { for (var i = 1; i < RetElementCount; i++) { if ((i > 3 ? result[i] != right[i - 4] : result[i] != left[i])) { succeeded = false; break; } } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(Avx2)}.{nameof(Avx2.InsertVector128)}<Int32>(Vector256<Int32>, Vector128<Int32>.1): {method} failed:"); TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})"); TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
-1
dotnet/runtime
66,235
Revert "Fix issues related to JsonSerializerOptions mutation and caching."
Reverts dotnet/runtime#65863 Fixes #66232
jkotas
2022-03-05T06:23:45Z
2022-03-05T14:08:07Z
17662fc30cfd4cc6e7dbba978a3fb512380c0b70
3ede8095da51d5d27a890020b61376155e9a61c2
Revert "Fix issues related to JsonSerializerOptions mutation and caching.". Reverts dotnet/runtime#65863 Fixes #66232
./src/tests/Loader/classloader/TypeGeneratorTests/TypeGeneratorTest1399/Generated1399.il
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. .assembly extern mscorlib { .publickeytoken = (B7 7A 5C 56 19 34 E0 89 ) .ver 4:0:0:0 } .assembly extern TestFramework { .publickeytoken = ( B0 3F 5F 7F 11 D5 0A 3A ) } //TYPES IN FORWARDER ASSEMBLIES: //TEST ASSEMBLY: .assembly Generated1399 { .hash algorithm 0x00008004 } .assembly extern xunit.core {} .class public BaseClass0 { .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ldarg.0 call instance void [mscorlib]System.Object::.ctor() ret } } .class public BaseClass1 extends BaseClass0 { .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ldarg.0 call instance void BaseClass0::.ctor() ret } } .class public G3_C1871`1<T0> extends class G2_C808`2<class BaseClass1,class BaseClass1> implements class IBase2`2<!T0,class BaseClass1> { .method public hidebysig newslot virtual instance string Method7<M0>() cil managed noinlining { ldstr "G3_C1871::Method7.18455<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string ClassMethod5134() cil managed noinlining { ldstr "G3_C1871::ClassMethod5134.18456()" ret } .method public hidebysig newslot virtual instance string ClassMethod5135<M0>() cil managed noinlining { ldstr "G3_C1871::ClassMethod5135.18457<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string ClassMethod5136<M0>() cil managed noinlining { ldstr "G3_C1871::ClassMethod5136.18458<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ldarg.0 call instance void class G2_C808`2<class BaseClass1,class BaseClass1>::.ctor() ret } } .class public G2_C808`2<T0, T1> extends class G1_C15`2<class BaseClass0,class BaseClass1> { .method public hidebysig newslot virtual instance string ClassMethod2999<M0>() cil managed noinlining { ldstr "G2_C808::ClassMethod2999.12469<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ldarg.0 call instance void class G1_C15`2<class BaseClass0,class BaseClass1>::.ctor() ret } } .class interface public abstract IBase2`2<+T0, -T1> { .method public hidebysig newslot abstract virtual instance string Method7<M0>() cil managed { } } .class public abstract G1_C15`2<T0, T1> implements class IBase2`2<!T1,!T1>, class IBase1`1<class BaseClass0> { .method public hidebysig newslot virtual instance string Method7<M0>() cil managed noinlining { ldstr "G1_C15::Method7.4885<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string 'IBase2<T1,T1>.Method7'<M0>() cil managed noinlining { .override method instance string class IBase2`2<!T1,!T1>::Method7<[1]>() ldstr "G1_C15::Method7.MI.4886<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig virtual instance string Method4() cil managed noinlining { ldstr "G1_C15::Method4.4887()" ret } .method public hidebysig newslot virtual instance string 'IBase1<class BaseClass0>.Method4'() cil managed noinlining { .override method instance string class IBase1`1<class BaseClass0>::Method4() ldstr "G1_C15::Method4.MI.4888()" ret } .method public hidebysig virtual instance string Method5() cil managed noinlining { ldstr "G1_C15::Method5.4889()" ret } .method public hidebysig newslot virtual instance string 'IBase1<class BaseClass0>.Method5'() cil managed noinlining { .override method instance string class IBase1`1<class BaseClass0>::Method5() ldstr "G1_C15::Method5.MI.4890()" ret } .method public hidebysig virtual instance string Method6<M0>() cil managed noinlining { ldstr "G1_C15::Method6.4891<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string 'IBase1<class BaseClass0>.Method6'<M0>() cil managed noinlining { .override method instance string class IBase1`1<class BaseClass0>::Method6<[1]>() ldstr "G1_C15::Method6.MI.4892<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ldarg.0 call instance void [mscorlib]System.Object::.ctor() ret } } .class interface public abstract IBase1`1<+T0> { .method public hidebysig newslot abstract virtual instance string Method4() cil managed { } .method public hidebysig newslot abstract virtual instance string Method5() cil managed { } .method public hidebysig newslot abstract virtual instance string Method6<M0>() cil managed { } } .class public auto ansi beforefieldinit Generated1399 { .method static void M.BaseClass0<(BaseClass0)W>(!!W inst, string exp) cil managed { .maxstack 5 .locals init (string[] actualResults) ldc.i4.s 0 newarr string stloc.s actualResults ldarg.1 ldstr "M.BaseClass0<(BaseClass0)W>(!!W inst, string exp)" ldc.i4.s 0 ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.BaseClass1<(BaseClass1)W>(!!W inst, string exp) cil managed { .maxstack 5 .locals init (string[] actualResults) ldc.i4.s 0 newarr string stloc.s actualResults ldarg.1 ldstr "M.BaseClass1<(BaseClass1)W>(!!W inst, string exp)" ldc.i4.s 0 ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G3_C1871.T<T0,(class G3_C1871`1<!!T0>)W>(!!W 'inst', string exp) cil managed { .maxstack 13 .locals init (string[] actualResults) ldc.i4.s 8 newarr string stloc.s actualResults ldarg.1 ldstr "M.G3_C1871.T<T0,(class G3_C1871`1<!!T0>)W>(!!W 'inst', string exp)" ldc.i4.s 8 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1871`1<!!T0>::ClassMethod2999<object>() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1871`1<!!T0>::ClassMethod5134() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1871`1<!!T0>::ClassMethod5135<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1871`1<!!T0>::ClassMethod5136<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1871`1<!!T0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1871`1<!!T0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1871`1<!!T0>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1871`1<!!T0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G3_C1871.A<(class G3_C1871`1<class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 13 .locals init (string[] actualResults) ldc.i4.s 8 newarr string stloc.s actualResults ldarg.1 ldstr "M.G3_C1871.A<(class G3_C1871`1<class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 8 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1871`1<class BaseClass0>::ClassMethod2999<object>() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1871`1<class BaseClass0>::ClassMethod5134() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1871`1<class BaseClass0>::ClassMethod5135<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1871`1<class BaseClass0>::ClassMethod5136<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1871`1<class BaseClass0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1871`1<class BaseClass0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1871`1<class BaseClass0>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1871`1<class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G3_C1871.B<(class G3_C1871`1<class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 13 .locals init (string[] actualResults) ldc.i4.s 8 newarr string stloc.s actualResults ldarg.1 ldstr "M.G3_C1871.B<(class G3_C1871`1<class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 8 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1871`1<class BaseClass1>::ClassMethod2999<object>() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1871`1<class BaseClass1>::ClassMethod5134() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1871`1<class BaseClass1>::ClassMethod5135<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1871`1<class BaseClass1>::ClassMethod5136<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1871`1<class BaseClass1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1871`1<class BaseClass1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1871`1<class BaseClass1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1871`1<class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G2_C808.T.T<T0,T1,(class G2_C808`2<!!T0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 10 .locals init (string[] actualResults) ldc.i4.s 5 newarr string stloc.s actualResults ldarg.1 ldstr "M.G2_C808.T.T<T0,T1,(class G2_C808`2<!!T0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 5 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G2_C808`2<!!T0,!!T1>::ClassMethod2999<object>() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G2_C808`2<!!T0,!!T1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G2_C808`2<!!T0,!!T1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G2_C808`2<!!T0,!!T1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G2_C808`2<!!T0,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G2_C808.A.T<T1,(class G2_C808`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 10 .locals init (string[] actualResults) ldc.i4.s 5 newarr string stloc.s actualResults ldarg.1 ldstr "M.G2_C808.A.T<T1,(class G2_C808`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 5 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G2_C808`2<class BaseClass0,!!T1>::ClassMethod2999<object>() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G2_C808`2<class BaseClass0,!!T1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G2_C808`2<class BaseClass0,!!T1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G2_C808`2<class BaseClass0,!!T1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G2_C808`2<class BaseClass0,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G2_C808.A.A<(class G2_C808`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 10 .locals init (string[] actualResults) ldc.i4.s 5 newarr string stloc.s actualResults ldarg.1 ldstr "M.G2_C808.A.A<(class G2_C808`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 5 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G2_C808`2<class BaseClass0,class BaseClass0>::ClassMethod2999<object>() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G2_C808`2<class BaseClass0,class BaseClass0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G2_C808`2<class BaseClass0,class BaseClass0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G2_C808`2<class BaseClass0,class BaseClass0>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G2_C808`2<class BaseClass0,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G2_C808.A.B<(class G2_C808`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 10 .locals init (string[] actualResults) ldc.i4.s 5 newarr string stloc.s actualResults ldarg.1 ldstr "M.G2_C808.A.B<(class G2_C808`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 5 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G2_C808`2<class BaseClass0,class BaseClass1>::ClassMethod2999<object>() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G2_C808`2<class BaseClass0,class BaseClass1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G2_C808`2<class BaseClass0,class BaseClass1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G2_C808`2<class BaseClass0,class BaseClass1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G2_C808`2<class BaseClass0,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G2_C808.B.T<T1,(class G2_C808`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 10 .locals init (string[] actualResults) ldc.i4.s 5 newarr string stloc.s actualResults ldarg.1 ldstr "M.G2_C808.B.T<T1,(class G2_C808`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 5 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G2_C808`2<class BaseClass1,!!T1>::ClassMethod2999<object>() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G2_C808`2<class BaseClass1,!!T1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G2_C808`2<class BaseClass1,!!T1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G2_C808`2<class BaseClass1,!!T1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G2_C808`2<class BaseClass1,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G2_C808.B.A<(class G2_C808`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 10 .locals init (string[] actualResults) ldc.i4.s 5 newarr string stloc.s actualResults ldarg.1 ldstr "M.G2_C808.B.A<(class G2_C808`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 5 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G2_C808`2<class BaseClass1,class BaseClass0>::ClassMethod2999<object>() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G2_C808`2<class BaseClass1,class BaseClass0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G2_C808`2<class BaseClass1,class BaseClass0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G2_C808`2<class BaseClass1,class BaseClass0>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G2_C808`2<class BaseClass1,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G2_C808.B.B<(class G2_C808`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 10 .locals init (string[] actualResults) ldc.i4.s 5 newarr string stloc.s actualResults ldarg.1 ldstr "M.G2_C808.B.B<(class G2_C808`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 5 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G2_C808`2<class BaseClass1,class BaseClass1>::ClassMethod2999<object>() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G2_C808`2<class BaseClass1,class BaseClass1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G2_C808`2<class BaseClass1,class BaseClass1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G2_C808`2<class BaseClass1,class BaseClass1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G2_C808`2<class BaseClass1,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.T.T<T0,T1,(class IBase2`2<!!T0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.T.T<T0,T1,(class IBase2`2<!!T0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<!!T0,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.A.T<T1,(class IBase2`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.A.T<T1,(class IBase2`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass0,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.A.A<(class IBase2`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.A.A<(class IBase2`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.A.B<(class IBase2`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.A.B<(class IBase2`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.B.T<T1,(class IBase2`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.B.T<T1,(class IBase2`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass1,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.B.A<(class IBase2`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.B.A<(class IBase2`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.B.B<(class IBase2`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.B.B<(class IBase2`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C15.T.T<T0,T1,(class G1_C15`2<!!T0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 9 .locals init (string[] actualResults) ldc.i4.s 4 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C15.T.T<T0,T1,(class G1_C15`2<!!T0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 4 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<!!T0,!!T1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<!!T0,!!T1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<!!T0,!!T1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<!!T0,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C15.A.T<T1,(class G1_C15`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 9 .locals init (string[] actualResults) ldc.i4.s 4 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C15.A.T<T1,(class G1_C15`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 4 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass0,!!T1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass0,!!T1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass0,!!T1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass0,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C15.A.A<(class G1_C15`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 9 .locals init (string[] actualResults) ldc.i4.s 4 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C15.A.A<(class G1_C15`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 4 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C15.A.B<(class G1_C15`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 9 .locals init (string[] actualResults) ldc.i4.s 4 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C15.A.B<(class G1_C15`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 4 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C15.B.T<T1,(class G1_C15`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 9 .locals init (string[] actualResults) ldc.i4.s 4 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C15.B.T<T1,(class G1_C15`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 4 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass1,!!T1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass1,!!T1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass1,!!T1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass1,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C15.B.A<(class G1_C15`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 9 .locals init (string[] actualResults) ldc.i4.s 4 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C15.B.A<(class G1_C15`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 4 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass0>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C15.B.B<(class G1_C15`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 9 .locals init (string[] actualResults) ldc.i4.s 4 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C15.B.B<(class G1_C15`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 4 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase1.T<T0,(class IBase1`1<!!T0>)W>(!!W 'inst', string exp) cil managed { .maxstack 8 .locals init (string[] actualResults) ldc.i4.s 3 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase1.T<T0,(class IBase1`1<!!T0>)W>(!!W 'inst', string exp)" ldc.i4.s 3 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<!!T0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<!!T0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<!!T0>::Method6<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase1.A<(class IBase1`1<class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 8 .locals init (string[] actualResults) ldc.i4.s 3 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase1.A<(class IBase1`1<class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 3 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<class BaseClass0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<class BaseClass0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase1.B<(class IBase1`1<class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 8 .locals init (string[] actualResults) ldc.i4.s 3 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase1.B<(class IBase1`1<class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 3 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<class BaseClass1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<class BaseClass1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method public hidebysig static void MethodCallingTest() cil managed { .maxstack 10 .locals init (object V_0) ldstr "========================== Method Calling Test ==========================" call void [mscorlib]System.Console::WriteLine(string) newobj instance void class G3_C1871`1<class BaseClass0>::.ctor() stloc.0 ldloc.0 dup castclass class G1_C15`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method6<object>() ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C15`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method5() ldstr "G1_C15::Method5.4889()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C15`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method4() ldstr "G1_C15::Method4.4887()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C15`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G1_C15::Method7.4885<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G1_C15::Method7.MI.4886<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass0>::Method4() ldstr "G1_C15::Method4.MI.4888()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method5() ldstr "G1_C15::Method5.MI.4890()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() ldstr "G1_C15::Method6.MI.4892<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G3_C1871::Method7.18455<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G2_C808`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C808`2<class BaseClass1,class BaseClass1>::ClassMethod2999<object>() ldstr "G2_C808::ClassMethod2999.12469<System.Object>()" ldstr "class G2_C808`2<class BaseClass1,class BaseClass1> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C808`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C808`2<class BaseClass1,class BaseClass1>::Method6<object>() ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G2_C808`2<class BaseClass1,class BaseClass1> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C808`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C808`2<class BaseClass1,class BaseClass1>::Method5() ldstr "G1_C15::Method5.4889()" ldstr "class G2_C808`2<class BaseClass1,class BaseClass1> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C808`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C808`2<class BaseClass1,class BaseClass1>::Method4() ldstr "G1_C15::Method4.4887()" ldstr "class G2_C808`2<class BaseClass1,class BaseClass1> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C808`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C808`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G1_C15::Method7.4885<System.Object>()" ldstr "class G2_C808`2<class BaseClass1,class BaseClass1> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G3_C1871`1<class BaseClass0> callvirt instance string class G3_C1871`1<class BaseClass0>::ClassMethod5136<object>() ldstr "G3_C1871::ClassMethod5136.18458<System.Object>()" ldstr "class G3_C1871`1<class BaseClass0> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1871`1<class BaseClass0> callvirt instance string class G3_C1871`1<class BaseClass0>::ClassMethod5135<object>() ldstr "G3_C1871::ClassMethod5135.18457<System.Object>()" ldstr "class G3_C1871`1<class BaseClass0> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1871`1<class BaseClass0> callvirt instance string class G3_C1871`1<class BaseClass0>::ClassMethod5134() ldstr "G3_C1871::ClassMethod5134.18456()" ldstr "class G3_C1871`1<class BaseClass0> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1871`1<class BaseClass0> callvirt instance string class G3_C1871`1<class BaseClass0>::Method7<object>() ldstr "G3_C1871::Method7.18455<System.Object>()" ldstr "class G3_C1871`1<class BaseClass0> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1871`1<class BaseClass0> callvirt instance string class G3_C1871`1<class BaseClass0>::ClassMethod2999<object>() ldstr "G2_C808::ClassMethod2999.12469<System.Object>()" ldstr "class G3_C1871`1<class BaseClass0> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1871`1<class BaseClass0> callvirt instance string class G3_C1871`1<class BaseClass0>::Method6<object>() ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G3_C1871`1<class BaseClass0> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1871`1<class BaseClass0> callvirt instance string class G3_C1871`1<class BaseClass0>::Method5() ldstr "G1_C15::Method5.4889()" ldstr "class G3_C1871`1<class BaseClass0> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1871`1<class BaseClass0> callvirt instance string class G3_C1871`1<class BaseClass0>::Method4() ldstr "G1_C15::Method4.4887()" ldstr "class G3_C1871`1<class BaseClass0> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop newobj instance void class G3_C1871`1<class BaseClass1>::.ctor() stloc.0 ldloc.0 dup castclass class G1_C15`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method6<object>() ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C15`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method5() ldstr "G1_C15::Method5.4889()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C15`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method4() ldstr "G1_C15::Method4.4887()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C15`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G1_C15::Method7.4885<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G3_C1871::Method7.18455<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass0>::Method4() ldstr "G1_C15::Method4.MI.4888()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method5() ldstr "G1_C15::Method5.MI.4890()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() ldstr "G1_C15::Method6.MI.4892<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G3_C1871::Method7.18455<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G2_C808`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C808`2<class BaseClass1,class BaseClass1>::ClassMethod2999<object>() ldstr "G2_C808::ClassMethod2999.12469<System.Object>()" ldstr "class G2_C808`2<class BaseClass1,class BaseClass1> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C808`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C808`2<class BaseClass1,class BaseClass1>::Method6<object>() ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G2_C808`2<class BaseClass1,class BaseClass1> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C808`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C808`2<class BaseClass1,class BaseClass1>::Method5() ldstr "G1_C15::Method5.4889()" ldstr "class G2_C808`2<class BaseClass1,class BaseClass1> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C808`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C808`2<class BaseClass1,class BaseClass1>::Method4() ldstr "G1_C15::Method4.4887()" ldstr "class G2_C808`2<class BaseClass1,class BaseClass1> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C808`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C808`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G1_C15::Method7.4885<System.Object>()" ldstr "class G2_C808`2<class BaseClass1,class BaseClass1> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G3_C1871`1<class BaseClass1> callvirt instance string class G3_C1871`1<class BaseClass1>::ClassMethod5136<object>() ldstr "G3_C1871::ClassMethod5136.18458<System.Object>()" ldstr "class G3_C1871`1<class BaseClass1> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1871`1<class BaseClass1> callvirt instance string class G3_C1871`1<class BaseClass1>::ClassMethod5135<object>() ldstr "G3_C1871::ClassMethod5135.18457<System.Object>()" ldstr "class G3_C1871`1<class BaseClass1> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1871`1<class BaseClass1> callvirt instance string class G3_C1871`1<class BaseClass1>::ClassMethod5134() ldstr "G3_C1871::ClassMethod5134.18456()" ldstr "class G3_C1871`1<class BaseClass1> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1871`1<class BaseClass1> callvirt instance string class G3_C1871`1<class BaseClass1>::Method7<object>() ldstr "G3_C1871::Method7.18455<System.Object>()" ldstr "class G3_C1871`1<class BaseClass1> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1871`1<class BaseClass1> callvirt instance string class G3_C1871`1<class BaseClass1>::ClassMethod2999<object>() ldstr "G2_C808::ClassMethod2999.12469<System.Object>()" ldstr "class G3_C1871`1<class BaseClass1> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1871`1<class BaseClass1> callvirt instance string class G3_C1871`1<class BaseClass1>::Method6<object>() ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G3_C1871`1<class BaseClass1> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1871`1<class BaseClass1> callvirt instance string class G3_C1871`1<class BaseClass1>::Method5() ldstr "G1_C15::Method5.4889()" ldstr "class G3_C1871`1<class BaseClass1> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1871`1<class BaseClass1> callvirt instance string class G3_C1871`1<class BaseClass1>::Method4() ldstr "G1_C15::Method4.4887()" ldstr "class G3_C1871`1<class BaseClass1> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop newobj instance void class G2_C808`2<class BaseClass0,class BaseClass0>::.ctor() stloc.0 ldloc.0 dup castclass class G1_C15`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method6<object>() ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C15`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method5() ldstr "G1_C15::Method5.4889()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C15`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method4() ldstr "G1_C15::Method4.4887()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C15`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G1_C15::Method7.4885<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G1_C15::Method7.MI.4886<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C808`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass0>::Method4() ldstr "G1_C15::Method4.MI.4888()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C808`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method5() ldstr "G1_C15::Method5.MI.4890()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C808`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() ldstr "G1_C15::Method6.MI.4892<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C808`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G1_C15::Method7.MI.4886<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G2_C808`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C808`2<class BaseClass0,class BaseClass0>::ClassMethod2999<object>() ldstr "G2_C808::ClassMethod2999.12469<System.Object>()" ldstr "class G2_C808`2<class BaseClass0,class BaseClass0> on type class G2_C808`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C808`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C808`2<class BaseClass0,class BaseClass0>::Method6<object>() ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G2_C808`2<class BaseClass0,class BaseClass0> on type class G2_C808`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C808`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C808`2<class BaseClass0,class BaseClass0>::Method5() ldstr "G1_C15::Method5.4889()" ldstr "class G2_C808`2<class BaseClass0,class BaseClass0> on type class G2_C808`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C808`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C808`2<class BaseClass0,class BaseClass0>::Method4() ldstr "G1_C15::Method4.4887()" ldstr "class G2_C808`2<class BaseClass0,class BaseClass0> on type class G2_C808`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C808`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C808`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "G1_C15::Method7.4885<System.Object>()" ldstr "class G2_C808`2<class BaseClass0,class BaseClass0> on type class G2_C808`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop newobj instance void class G2_C808`2<class BaseClass0,class BaseClass1>::.ctor() stloc.0 ldloc.0 dup castclass class G1_C15`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method6<object>() ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C15`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method5() ldstr "G1_C15::Method5.4889()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C15`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method4() ldstr "G1_C15::Method4.4887()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C15`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G1_C15::Method7.4885<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G1_C15::Method7.MI.4886<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C808`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass0>::Method4() ldstr "G1_C15::Method4.MI.4888()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C808`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method5() ldstr "G1_C15::Method5.MI.4890()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C808`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() ldstr "G1_C15::Method6.MI.4892<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C808`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G1_C15::Method7.MI.4886<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G2_C808`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C808`2<class BaseClass0,class BaseClass1>::ClassMethod2999<object>() ldstr "G2_C808::ClassMethod2999.12469<System.Object>()" ldstr "class G2_C808`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C808`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C808`2<class BaseClass0,class BaseClass1>::Method6<object>() ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G2_C808`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C808`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C808`2<class BaseClass0,class BaseClass1>::Method5() ldstr "G1_C15::Method5.4889()" ldstr "class G2_C808`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C808`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C808`2<class BaseClass0,class BaseClass1>::Method4() ldstr "G1_C15::Method4.4887()" ldstr "class G2_C808`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C808`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C808`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G1_C15::Method7.4885<System.Object>()" ldstr "class G2_C808`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop newobj instance void class G2_C808`2<class BaseClass1,class BaseClass0>::.ctor() stloc.0 ldloc.0 dup castclass class G1_C15`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method6<object>() ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C15`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method5() ldstr "G1_C15::Method5.4889()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C15`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method4() ldstr "G1_C15::Method4.4887()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C15`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G1_C15::Method7.4885<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G1_C15::Method7.MI.4886<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C808`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass0>::Method4() ldstr "G1_C15::Method4.MI.4888()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C808`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method5() ldstr "G1_C15::Method5.MI.4890()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C808`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() ldstr "G1_C15::Method6.MI.4892<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C808`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G1_C15::Method7.MI.4886<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G2_C808`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C808`2<class BaseClass1,class BaseClass0>::ClassMethod2999<object>() ldstr "G2_C808::ClassMethod2999.12469<System.Object>()" ldstr "class G2_C808`2<class BaseClass1,class BaseClass0> on type class G2_C808`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C808`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C808`2<class BaseClass1,class BaseClass0>::Method6<object>() ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G2_C808`2<class BaseClass1,class BaseClass0> on type class G2_C808`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C808`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C808`2<class BaseClass1,class BaseClass0>::Method5() ldstr "G1_C15::Method5.4889()" ldstr "class G2_C808`2<class BaseClass1,class BaseClass0> on type class G2_C808`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C808`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C808`2<class BaseClass1,class BaseClass0>::Method4() ldstr "G1_C15::Method4.4887()" ldstr "class G2_C808`2<class BaseClass1,class BaseClass0> on type class G2_C808`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C808`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C808`2<class BaseClass1,class BaseClass0>::Method7<object>() ldstr "G1_C15::Method7.4885<System.Object>()" ldstr "class G2_C808`2<class BaseClass1,class BaseClass0> on type class G2_C808`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop newobj instance void class G2_C808`2<class BaseClass1,class BaseClass1>::.ctor() stloc.0 ldloc.0 dup castclass class G1_C15`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method6<object>() ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C15`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method5() ldstr "G1_C15::Method5.4889()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C15`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method4() ldstr "G1_C15::Method4.4887()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C15`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G1_C15::Method7.4885<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G1_C15::Method7.MI.4886<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C808`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass0>::Method4() ldstr "G1_C15::Method4.MI.4888()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C808`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method5() ldstr "G1_C15::Method5.MI.4890()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C808`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() ldstr "G1_C15::Method6.MI.4892<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C808`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G1_C15::Method7.MI.4886<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G2_C808`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C808`2<class BaseClass1,class BaseClass1>::ClassMethod2999<object>() ldstr "G2_C808::ClassMethod2999.12469<System.Object>()" ldstr "class G2_C808`2<class BaseClass1,class BaseClass1> on type class G2_C808`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C808`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C808`2<class BaseClass1,class BaseClass1>::Method6<object>() ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G2_C808`2<class BaseClass1,class BaseClass1> on type class G2_C808`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C808`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C808`2<class BaseClass1,class BaseClass1>::Method5() ldstr "G1_C15::Method5.4889()" ldstr "class G2_C808`2<class BaseClass1,class BaseClass1> on type class G2_C808`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C808`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C808`2<class BaseClass1,class BaseClass1>::Method4() ldstr "G1_C15::Method4.4887()" ldstr "class G2_C808`2<class BaseClass1,class BaseClass1> on type class G2_C808`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C808`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C808`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G1_C15::Method7.4885<System.Object>()" ldstr "class G2_C808`2<class BaseClass1,class BaseClass1> on type class G2_C808`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldstr "========================================================================\n\n" call void [mscorlib]System.Console::WriteLine(string) ret } .method public hidebysig static void ConstrainedCallsTest() cil managed { .maxstack 10 .locals init (object V_0) ldstr "========================== Constrained Calls Test ==========================" call void [mscorlib]System.Console::WriteLine(string) newobj instance void class G3_C1871`1<class BaseClass0>::.ctor() stloc.0 ldloc.0 ldstr "G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G1_C15::Method7.4885<System.Object>()#" call void Generated1399::M.G1_C15.T.T<class BaseClass0,class BaseClass1,class G3_C1871`1<class BaseClass0>>(!!2,string) ldloc.0 ldstr "G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G1_C15::Method7.4885<System.Object>()#" call void Generated1399::M.G1_C15.A.T<class BaseClass1,class G3_C1871`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G1_C15::Method7.4885<System.Object>()#" call void Generated1399::M.G1_C15.A.B<class G3_C1871`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C15::Method7.MI.4886<System.Object>()#" call void Generated1399::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G3_C1871`1<class BaseClass0>>(!!2,string) ldloc.0 ldstr "G1_C15::Method7.MI.4886<System.Object>()#" call void Generated1399::M.IBase2.B.T<class BaseClass1,class G3_C1871`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C15::Method7.MI.4886<System.Object>()#" call void Generated1399::M.IBase2.B.B<class G3_C1871`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C15::Method4.MI.4888()#G1_C15::Method5.MI.4890()#G1_C15::Method6.MI.4892<System.Object>()#" call void Generated1399::M.IBase1.T<class BaseClass0,class G3_C1871`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C15::Method4.MI.4888()#G1_C15::Method5.MI.4890()#G1_C15::Method6.MI.4892<System.Object>()#" call void Generated1399::M.IBase1.A<class G3_C1871`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G3_C1871::Method7.18455<System.Object>()#" call void Generated1399::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G3_C1871`1<class BaseClass0>>(!!2,string) ldloc.0 ldstr "G3_C1871::Method7.18455<System.Object>()#" call void Generated1399::M.IBase2.A.T<class BaseClass1,class G3_C1871`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G3_C1871::Method7.18455<System.Object>()#" call void Generated1399::M.IBase2.A.B<class G3_C1871`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C808::ClassMethod2999.12469<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G1_C15::Method7.4885<System.Object>()#" call void Generated1399::M.G2_C808.T.T<class BaseClass1,class BaseClass1,class G3_C1871`1<class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C808::ClassMethod2999.12469<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G1_C15::Method7.4885<System.Object>()#" call void Generated1399::M.G2_C808.B.T<class BaseClass1,class G3_C1871`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C808::ClassMethod2999.12469<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G1_C15::Method7.4885<System.Object>()#" call void Generated1399::M.G2_C808.B.B<class G3_C1871`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C808::ClassMethod2999.12469<System.Object>()#G3_C1871::ClassMethod5134.18456()#G3_C1871::ClassMethod5135.18457<System.Object>()#G3_C1871::ClassMethod5136.18458<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G3_C1871::Method7.18455<System.Object>()#" call void Generated1399::M.G3_C1871.T<class BaseClass0,class G3_C1871`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C808::ClassMethod2999.12469<System.Object>()#G3_C1871::ClassMethod5134.18456()#G3_C1871::ClassMethod5135.18457<System.Object>()#G3_C1871::ClassMethod5136.18458<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G3_C1871::Method7.18455<System.Object>()#" call void Generated1399::M.G3_C1871.A<class G3_C1871`1<class BaseClass0>>(!!0,string) newobj instance void class G3_C1871`1<class BaseClass1>::.ctor() stloc.0 ldloc.0 ldstr "G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G1_C15::Method7.4885<System.Object>()#" call void Generated1399::M.G1_C15.T.T<class BaseClass0,class BaseClass1,class G3_C1871`1<class BaseClass1>>(!!2,string) ldloc.0 ldstr "G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G1_C15::Method7.4885<System.Object>()#" call void Generated1399::M.G1_C15.A.T<class BaseClass1,class G3_C1871`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G1_C15::Method7.4885<System.Object>()#" call void Generated1399::M.G1_C15.A.B<class G3_C1871`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G3_C1871::Method7.18455<System.Object>()#" call void Generated1399::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G3_C1871`1<class BaseClass1>>(!!2,string) ldloc.0 ldstr "G3_C1871::Method7.18455<System.Object>()#" call void Generated1399::M.IBase2.B.T<class BaseClass1,class G3_C1871`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G3_C1871::Method7.18455<System.Object>()#" call void Generated1399::M.IBase2.B.B<class G3_C1871`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C15::Method4.MI.4888()#G1_C15::Method5.MI.4890()#G1_C15::Method6.MI.4892<System.Object>()#" call void Generated1399::M.IBase1.T<class BaseClass0,class G3_C1871`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C15::Method4.MI.4888()#G1_C15::Method5.MI.4890()#G1_C15::Method6.MI.4892<System.Object>()#" call void Generated1399::M.IBase1.A<class G3_C1871`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G3_C1871::Method7.18455<System.Object>()#" call void Generated1399::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G3_C1871`1<class BaseClass1>>(!!2,string) ldloc.0 ldstr "G3_C1871::Method7.18455<System.Object>()#" call void Generated1399::M.IBase2.A.T<class BaseClass1,class G3_C1871`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G3_C1871::Method7.18455<System.Object>()#" call void Generated1399::M.IBase2.A.B<class G3_C1871`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C808::ClassMethod2999.12469<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G1_C15::Method7.4885<System.Object>()#" call void Generated1399::M.G2_C808.T.T<class BaseClass1,class BaseClass1,class G3_C1871`1<class BaseClass1>>(!!2,string) ldloc.0 ldstr "G2_C808::ClassMethod2999.12469<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G1_C15::Method7.4885<System.Object>()#" call void Generated1399::M.G2_C808.B.T<class BaseClass1,class G3_C1871`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C808::ClassMethod2999.12469<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G1_C15::Method7.4885<System.Object>()#" call void Generated1399::M.G2_C808.B.B<class G3_C1871`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C808::ClassMethod2999.12469<System.Object>()#G3_C1871::ClassMethod5134.18456()#G3_C1871::ClassMethod5135.18457<System.Object>()#G3_C1871::ClassMethod5136.18458<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G3_C1871::Method7.18455<System.Object>()#" call void Generated1399::M.G3_C1871.T<class BaseClass1,class G3_C1871`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C808::ClassMethod2999.12469<System.Object>()#G3_C1871::ClassMethod5134.18456()#G3_C1871::ClassMethod5135.18457<System.Object>()#G3_C1871::ClassMethod5136.18458<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G3_C1871::Method7.18455<System.Object>()#" call void Generated1399::M.G3_C1871.B<class G3_C1871`1<class BaseClass1>>(!!0,string) newobj instance void class G2_C808`2<class BaseClass0,class BaseClass0>::.ctor() stloc.0 ldloc.0 ldstr "G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G1_C15::Method7.4885<System.Object>()#" call void Generated1399::M.G1_C15.T.T<class BaseClass0,class BaseClass1,class G2_C808`2<class BaseClass0,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G1_C15::Method7.4885<System.Object>()#" call void Generated1399::M.G1_C15.A.T<class BaseClass1,class G2_C808`2<class BaseClass0,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G1_C15::Method7.4885<System.Object>()#" call void Generated1399::M.G1_C15.A.B<class G2_C808`2<class BaseClass0,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C15::Method7.MI.4886<System.Object>()#" call void Generated1399::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G2_C808`2<class BaseClass0,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G1_C15::Method7.MI.4886<System.Object>()#" call void Generated1399::M.IBase2.B.T<class BaseClass1,class G2_C808`2<class BaseClass0,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C15::Method7.MI.4886<System.Object>()#" call void Generated1399::M.IBase2.B.B<class G2_C808`2<class BaseClass0,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C15::Method4.MI.4888()#G1_C15::Method5.MI.4890()#G1_C15::Method6.MI.4892<System.Object>()#" call void Generated1399::M.IBase1.T<class BaseClass0,class G2_C808`2<class BaseClass0,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C15::Method4.MI.4888()#G1_C15::Method5.MI.4890()#G1_C15::Method6.MI.4892<System.Object>()#" call void Generated1399::M.IBase1.A<class G2_C808`2<class BaseClass0,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C15::Method7.MI.4886<System.Object>()#" call void Generated1399::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G2_C808`2<class BaseClass0,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G1_C15::Method7.MI.4886<System.Object>()#" call void Generated1399::M.IBase2.A.T<class BaseClass1,class G2_C808`2<class BaseClass0,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C15::Method7.MI.4886<System.Object>()#" call void Generated1399::M.IBase2.A.B<class G2_C808`2<class BaseClass0,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C808::ClassMethod2999.12469<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G1_C15::Method7.4885<System.Object>()#" call void Generated1399::M.G2_C808.T.T<class BaseClass0,class BaseClass0,class G2_C808`2<class BaseClass0,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C808::ClassMethod2999.12469<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G1_C15::Method7.4885<System.Object>()#" call void Generated1399::M.G2_C808.A.T<class BaseClass0,class G2_C808`2<class BaseClass0,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C808::ClassMethod2999.12469<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G1_C15::Method7.4885<System.Object>()#" call void Generated1399::M.G2_C808.A.A<class G2_C808`2<class BaseClass0,class BaseClass0>>(!!0,string) newobj instance void class G2_C808`2<class BaseClass0,class BaseClass1>::.ctor() stloc.0 ldloc.0 ldstr "G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G1_C15::Method7.4885<System.Object>()#" call void Generated1399::M.G1_C15.T.T<class BaseClass0,class BaseClass1,class G2_C808`2<class BaseClass0,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G1_C15::Method7.4885<System.Object>()#" call void Generated1399::M.G1_C15.A.T<class BaseClass1,class G2_C808`2<class BaseClass0,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G1_C15::Method7.4885<System.Object>()#" call void Generated1399::M.G1_C15.A.B<class G2_C808`2<class BaseClass0,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C15::Method7.MI.4886<System.Object>()#" call void Generated1399::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G2_C808`2<class BaseClass0,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G1_C15::Method7.MI.4886<System.Object>()#" call void Generated1399::M.IBase2.B.T<class BaseClass1,class G2_C808`2<class BaseClass0,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C15::Method7.MI.4886<System.Object>()#" call void Generated1399::M.IBase2.B.B<class G2_C808`2<class BaseClass0,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C15::Method4.MI.4888()#G1_C15::Method5.MI.4890()#G1_C15::Method6.MI.4892<System.Object>()#" call void Generated1399::M.IBase1.T<class BaseClass0,class G2_C808`2<class BaseClass0,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C15::Method4.MI.4888()#G1_C15::Method5.MI.4890()#G1_C15::Method6.MI.4892<System.Object>()#" call void Generated1399::M.IBase1.A<class G2_C808`2<class BaseClass0,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C15::Method7.MI.4886<System.Object>()#" call void Generated1399::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G2_C808`2<class BaseClass0,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G1_C15::Method7.MI.4886<System.Object>()#" call void Generated1399::M.IBase2.A.T<class BaseClass1,class G2_C808`2<class BaseClass0,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C15::Method7.MI.4886<System.Object>()#" call void Generated1399::M.IBase2.A.B<class G2_C808`2<class BaseClass0,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C808::ClassMethod2999.12469<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G1_C15::Method7.4885<System.Object>()#" call void Generated1399::M.G2_C808.T.T<class BaseClass0,class BaseClass1,class G2_C808`2<class BaseClass0,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G2_C808::ClassMethod2999.12469<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G1_C15::Method7.4885<System.Object>()#" call void Generated1399::M.G2_C808.A.T<class BaseClass1,class G2_C808`2<class BaseClass0,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C808::ClassMethod2999.12469<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G1_C15::Method7.4885<System.Object>()#" call void Generated1399::M.G2_C808.A.B<class G2_C808`2<class BaseClass0,class BaseClass1>>(!!0,string) newobj instance void class G2_C808`2<class BaseClass1,class BaseClass0>::.ctor() stloc.0 ldloc.0 ldstr "G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G1_C15::Method7.4885<System.Object>()#" call void Generated1399::M.G1_C15.T.T<class BaseClass0,class BaseClass1,class G2_C808`2<class BaseClass1,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G1_C15::Method7.4885<System.Object>()#" call void Generated1399::M.G1_C15.A.T<class BaseClass1,class G2_C808`2<class BaseClass1,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G1_C15::Method7.4885<System.Object>()#" call void Generated1399::M.G1_C15.A.B<class G2_C808`2<class BaseClass1,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C15::Method7.MI.4886<System.Object>()#" call void Generated1399::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G2_C808`2<class BaseClass1,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G1_C15::Method7.MI.4886<System.Object>()#" call void Generated1399::M.IBase2.B.T<class BaseClass1,class G2_C808`2<class BaseClass1,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C15::Method7.MI.4886<System.Object>()#" call void Generated1399::M.IBase2.B.B<class G2_C808`2<class BaseClass1,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C15::Method4.MI.4888()#G1_C15::Method5.MI.4890()#G1_C15::Method6.MI.4892<System.Object>()#" call void Generated1399::M.IBase1.T<class BaseClass0,class G2_C808`2<class BaseClass1,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C15::Method4.MI.4888()#G1_C15::Method5.MI.4890()#G1_C15::Method6.MI.4892<System.Object>()#" call void Generated1399::M.IBase1.A<class G2_C808`2<class BaseClass1,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C15::Method7.MI.4886<System.Object>()#" call void Generated1399::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G2_C808`2<class BaseClass1,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G1_C15::Method7.MI.4886<System.Object>()#" call void Generated1399::M.IBase2.A.T<class BaseClass1,class G2_C808`2<class BaseClass1,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C15::Method7.MI.4886<System.Object>()#" call void Generated1399::M.IBase2.A.B<class G2_C808`2<class BaseClass1,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C808::ClassMethod2999.12469<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G1_C15::Method7.4885<System.Object>()#" call void Generated1399::M.G2_C808.T.T<class BaseClass1,class BaseClass0,class G2_C808`2<class BaseClass1,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C808::ClassMethod2999.12469<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G1_C15::Method7.4885<System.Object>()#" call void Generated1399::M.G2_C808.B.T<class BaseClass0,class G2_C808`2<class BaseClass1,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C808::ClassMethod2999.12469<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G1_C15::Method7.4885<System.Object>()#" call void Generated1399::M.G2_C808.B.A<class G2_C808`2<class BaseClass1,class BaseClass0>>(!!0,string) newobj instance void class G2_C808`2<class BaseClass1,class BaseClass1>::.ctor() stloc.0 ldloc.0 ldstr "G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G1_C15::Method7.4885<System.Object>()#" call void Generated1399::M.G1_C15.T.T<class BaseClass0,class BaseClass1,class G2_C808`2<class BaseClass1,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G1_C15::Method7.4885<System.Object>()#" call void Generated1399::M.G1_C15.A.T<class BaseClass1,class G2_C808`2<class BaseClass1,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G1_C15::Method7.4885<System.Object>()#" call void Generated1399::M.G1_C15.A.B<class G2_C808`2<class BaseClass1,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C15::Method7.MI.4886<System.Object>()#" call void Generated1399::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G2_C808`2<class BaseClass1,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G1_C15::Method7.MI.4886<System.Object>()#" call void Generated1399::M.IBase2.B.T<class BaseClass1,class G2_C808`2<class BaseClass1,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C15::Method7.MI.4886<System.Object>()#" call void Generated1399::M.IBase2.B.B<class G2_C808`2<class BaseClass1,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C15::Method4.MI.4888()#G1_C15::Method5.MI.4890()#G1_C15::Method6.MI.4892<System.Object>()#" call void Generated1399::M.IBase1.T<class BaseClass0,class G2_C808`2<class BaseClass1,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C15::Method4.MI.4888()#G1_C15::Method5.MI.4890()#G1_C15::Method6.MI.4892<System.Object>()#" call void Generated1399::M.IBase1.A<class G2_C808`2<class BaseClass1,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C15::Method7.MI.4886<System.Object>()#" call void Generated1399::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G2_C808`2<class BaseClass1,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G1_C15::Method7.MI.4886<System.Object>()#" call void Generated1399::M.IBase2.A.T<class BaseClass1,class G2_C808`2<class BaseClass1,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C15::Method7.MI.4886<System.Object>()#" call void Generated1399::M.IBase2.A.B<class G2_C808`2<class BaseClass1,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C808::ClassMethod2999.12469<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G1_C15::Method7.4885<System.Object>()#" call void Generated1399::M.G2_C808.T.T<class BaseClass1,class BaseClass1,class G2_C808`2<class BaseClass1,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G2_C808::ClassMethod2999.12469<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G1_C15::Method7.4885<System.Object>()#" call void Generated1399::M.G2_C808.B.T<class BaseClass1,class G2_C808`2<class BaseClass1,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C808::ClassMethod2999.12469<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G1_C15::Method7.4885<System.Object>()#" call void Generated1399::M.G2_C808.B.B<class G2_C808`2<class BaseClass1,class BaseClass1>>(!!0,string) ldstr "========================================================================\n\n" call void [mscorlib]System.Console::WriteLine(string) ret } .method public hidebysig static void StructConstrainedInterfaceCallsTest() cil managed { .maxstack 10 ldstr "===================== Struct Constrained Interface Calls Test =====================" call void [mscorlib]System.Console::WriteLine(string) ldstr "========================================================================\n\n" call void [mscorlib]System.Console::WriteLine(string) ret } .method public hidebysig static void CalliTest() cil managed { .maxstack 10 .locals init (object V_0) ldstr "========================== Method Calli Test ==========================" call void [mscorlib]System.Console::WriteLine(string) newobj instance void class G3_C1871`1<class BaseClass0>::.ctor() stloc.0 ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method6<object>() calli default string(class G3_C1871`1<class BaseClass0>) ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method5() calli default string(class G3_C1871`1<class BaseClass0>) ldstr "G1_C15::Method5.4889()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method4() calli default string(class G3_C1871`1<class BaseClass0>) ldstr "G1_C15::Method4.4887()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G3_C1871`1<class BaseClass0>) ldstr "G1_C15::Method7.4885<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G3_C1871`1<class BaseClass0>) ldstr "G1_C15::Method7.MI.4886<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4() calli default string(class G3_C1871`1<class BaseClass0>) ldstr "G1_C15::Method4.MI.4888()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5() calli default string(class G3_C1871`1<class BaseClass0>) ldstr "G1_C15::Method5.MI.4890()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>() calli default string(class G3_C1871`1<class BaseClass0>) ldstr "G1_C15::Method6.MI.4892<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G3_C1871`1<class BaseClass0>) ldstr "G3_C1871::Method7.18455<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C808`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C808`2<class BaseClass1,class BaseClass1>::ClassMethod2999<object>() calli default string(class G3_C1871`1<class BaseClass0>) ldstr "G2_C808::ClassMethod2999.12469<System.Object>()" ldstr "class G2_C808`2<class BaseClass1,class BaseClass1> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C808`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C808`2<class BaseClass1,class BaseClass1>::Method6<object>() calli default string(class G3_C1871`1<class BaseClass0>) ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G2_C808`2<class BaseClass1,class BaseClass1> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C808`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C808`2<class BaseClass1,class BaseClass1>::Method5() calli default string(class G3_C1871`1<class BaseClass0>) ldstr "G1_C15::Method5.4889()" ldstr "class G2_C808`2<class BaseClass1,class BaseClass1> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C808`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C808`2<class BaseClass1,class BaseClass1>::Method4() calli default string(class G3_C1871`1<class BaseClass0>) ldstr "G1_C15::Method4.4887()" ldstr "class G2_C808`2<class BaseClass1,class BaseClass1> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C808`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C808`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G3_C1871`1<class BaseClass0>) ldstr "G1_C15::Method7.4885<System.Object>()" ldstr "class G2_C808`2<class BaseClass1,class BaseClass1> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1871`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1871`1<class BaseClass0>::ClassMethod5136<object>() calli default string(class G3_C1871`1<class BaseClass0>) ldstr "G3_C1871::ClassMethod5136.18458<System.Object>()" ldstr "class G3_C1871`1<class BaseClass0> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1871`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1871`1<class BaseClass0>::ClassMethod5135<object>() calli default string(class G3_C1871`1<class BaseClass0>) ldstr "G3_C1871::ClassMethod5135.18457<System.Object>()" ldstr "class G3_C1871`1<class BaseClass0> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1871`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1871`1<class BaseClass0>::ClassMethod5134() calli default string(class G3_C1871`1<class BaseClass0>) ldstr "G3_C1871::ClassMethod5134.18456()" ldstr "class G3_C1871`1<class BaseClass0> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1871`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1871`1<class BaseClass0>::Method7<object>() calli default string(class G3_C1871`1<class BaseClass0>) ldstr "G3_C1871::Method7.18455<System.Object>()" ldstr "class G3_C1871`1<class BaseClass0> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1871`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1871`1<class BaseClass0>::ClassMethod2999<object>() calli default string(class G3_C1871`1<class BaseClass0>) ldstr "G2_C808::ClassMethod2999.12469<System.Object>()" ldstr "class G3_C1871`1<class BaseClass0> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1871`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1871`1<class BaseClass0>::Method6<object>() calli default string(class G3_C1871`1<class BaseClass0>) ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G3_C1871`1<class BaseClass0> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1871`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1871`1<class BaseClass0>::Method5() calli default string(class G3_C1871`1<class BaseClass0>) ldstr "G1_C15::Method5.4889()" ldstr "class G3_C1871`1<class BaseClass0> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1871`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1871`1<class BaseClass0>::Method4() calli default string(class G3_C1871`1<class BaseClass0>) ldstr "G1_C15::Method4.4887()" ldstr "class G3_C1871`1<class BaseClass0> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) newobj instance void class G3_C1871`1<class BaseClass1>::.ctor() stloc.0 ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method6<object>() calli default string(class G3_C1871`1<class BaseClass1>) ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method5() calli default string(class G3_C1871`1<class BaseClass1>) ldstr "G1_C15::Method5.4889()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method4() calli default string(class G3_C1871`1<class BaseClass1>) ldstr "G1_C15::Method4.4887()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G3_C1871`1<class BaseClass1>) ldstr "G1_C15::Method7.4885<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G3_C1871`1<class BaseClass1>) ldstr "G3_C1871::Method7.18455<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4() calli default string(class G3_C1871`1<class BaseClass1>) ldstr "G1_C15::Method4.MI.4888()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5() calli default string(class G3_C1871`1<class BaseClass1>) ldstr "G1_C15::Method5.MI.4890()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>() calli default string(class G3_C1871`1<class BaseClass1>) ldstr "G1_C15::Method6.MI.4892<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G3_C1871`1<class BaseClass1>) ldstr "G3_C1871::Method7.18455<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C808`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C808`2<class BaseClass1,class BaseClass1>::ClassMethod2999<object>() calli default string(class G3_C1871`1<class BaseClass1>) ldstr "G2_C808::ClassMethod2999.12469<System.Object>()" ldstr "class G2_C808`2<class BaseClass1,class BaseClass1> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C808`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C808`2<class BaseClass1,class BaseClass1>::Method6<object>() calli default string(class G3_C1871`1<class BaseClass1>) ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G2_C808`2<class BaseClass1,class BaseClass1> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C808`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C808`2<class BaseClass1,class BaseClass1>::Method5() calli default string(class G3_C1871`1<class BaseClass1>) ldstr "G1_C15::Method5.4889()" ldstr "class G2_C808`2<class BaseClass1,class BaseClass1> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C808`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C808`2<class BaseClass1,class BaseClass1>::Method4() calli default string(class G3_C1871`1<class BaseClass1>) ldstr "G1_C15::Method4.4887()" ldstr "class G2_C808`2<class BaseClass1,class BaseClass1> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C808`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C808`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G3_C1871`1<class BaseClass1>) ldstr "G1_C15::Method7.4885<System.Object>()" ldstr "class G2_C808`2<class BaseClass1,class BaseClass1> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1871`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1871`1<class BaseClass1>::ClassMethod5136<object>() calli default string(class G3_C1871`1<class BaseClass1>) ldstr "G3_C1871::ClassMethod5136.18458<System.Object>()" ldstr "class G3_C1871`1<class BaseClass1> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1871`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1871`1<class BaseClass1>::ClassMethod5135<object>() calli default string(class G3_C1871`1<class BaseClass1>) ldstr "G3_C1871::ClassMethod5135.18457<System.Object>()" ldstr "class G3_C1871`1<class BaseClass1> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1871`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1871`1<class BaseClass1>::ClassMethod5134() calli default string(class G3_C1871`1<class BaseClass1>) ldstr "G3_C1871::ClassMethod5134.18456()" ldstr "class G3_C1871`1<class BaseClass1> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1871`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1871`1<class BaseClass1>::Method7<object>() calli default string(class G3_C1871`1<class BaseClass1>) ldstr "G3_C1871::Method7.18455<System.Object>()" ldstr "class G3_C1871`1<class BaseClass1> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1871`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1871`1<class BaseClass1>::ClassMethod2999<object>() calli default string(class G3_C1871`1<class BaseClass1>) ldstr "G2_C808::ClassMethod2999.12469<System.Object>()" ldstr "class G3_C1871`1<class BaseClass1> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1871`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1871`1<class BaseClass1>::Method6<object>() calli default string(class G3_C1871`1<class BaseClass1>) ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G3_C1871`1<class BaseClass1> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1871`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1871`1<class BaseClass1>::Method5() calli default string(class G3_C1871`1<class BaseClass1>) ldstr "G1_C15::Method5.4889()" ldstr "class G3_C1871`1<class BaseClass1> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1871`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1871`1<class BaseClass1>::Method4() calli default string(class G3_C1871`1<class BaseClass1>) ldstr "G1_C15::Method4.4887()" ldstr "class G3_C1871`1<class BaseClass1> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) newobj instance void class G2_C808`2<class BaseClass0,class BaseClass0>::.ctor() stloc.0 ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method6<object>() calli default string(class G2_C808`2<class BaseClass0,class BaseClass0>) ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method5() calli default string(class G2_C808`2<class BaseClass0,class BaseClass0>) ldstr "G1_C15::Method5.4889()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method4() calli default string(class G2_C808`2<class BaseClass0,class BaseClass0>) ldstr "G1_C15::Method4.4887()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G2_C808`2<class BaseClass0,class BaseClass0>) ldstr "G1_C15::Method7.4885<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G2_C808`2<class BaseClass0,class BaseClass0>) ldstr "G1_C15::Method7.MI.4886<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C808`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4() calli default string(class G2_C808`2<class BaseClass0,class BaseClass0>) ldstr "G1_C15::Method4.MI.4888()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C808`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5() calli default string(class G2_C808`2<class BaseClass0,class BaseClass0>) ldstr "G1_C15::Method5.MI.4890()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C808`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>() calli default string(class G2_C808`2<class BaseClass0,class BaseClass0>) ldstr "G1_C15::Method6.MI.4892<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C808`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G2_C808`2<class BaseClass0,class BaseClass0>) ldstr "G1_C15::Method7.MI.4886<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C808`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C808`2<class BaseClass0,class BaseClass0>::ClassMethod2999<object>() calli default string(class G2_C808`2<class BaseClass0,class BaseClass0>) ldstr "G2_C808::ClassMethod2999.12469<System.Object>()" ldstr "class G2_C808`2<class BaseClass0,class BaseClass0> on type class G2_C808`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C808`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C808`2<class BaseClass0,class BaseClass0>::Method6<object>() calli default string(class G2_C808`2<class BaseClass0,class BaseClass0>) ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G2_C808`2<class BaseClass0,class BaseClass0> on type class G2_C808`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C808`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C808`2<class BaseClass0,class BaseClass0>::Method5() calli default string(class G2_C808`2<class BaseClass0,class BaseClass0>) ldstr "G1_C15::Method5.4889()" ldstr "class G2_C808`2<class BaseClass0,class BaseClass0> on type class G2_C808`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C808`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C808`2<class BaseClass0,class BaseClass0>::Method4() calli default string(class G2_C808`2<class BaseClass0,class BaseClass0>) ldstr "G1_C15::Method4.4887()" ldstr "class G2_C808`2<class BaseClass0,class BaseClass0> on type class G2_C808`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C808`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C808`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(class G2_C808`2<class BaseClass0,class BaseClass0>) ldstr "G1_C15::Method7.4885<System.Object>()" ldstr "class G2_C808`2<class BaseClass0,class BaseClass0> on type class G2_C808`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) newobj instance void class G2_C808`2<class BaseClass0,class BaseClass1>::.ctor() stloc.0 ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method6<object>() calli default string(class G2_C808`2<class BaseClass0,class BaseClass1>) ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method5() calli default string(class G2_C808`2<class BaseClass0,class BaseClass1>) ldstr "G1_C15::Method5.4889()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method4() calli default string(class G2_C808`2<class BaseClass0,class BaseClass1>) ldstr "G1_C15::Method4.4887()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G2_C808`2<class BaseClass0,class BaseClass1>) ldstr "G1_C15::Method7.4885<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G2_C808`2<class BaseClass0,class BaseClass1>) ldstr "G1_C15::Method7.MI.4886<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C808`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4() calli default string(class G2_C808`2<class BaseClass0,class BaseClass1>) ldstr "G1_C15::Method4.MI.4888()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C808`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5() calli default string(class G2_C808`2<class BaseClass0,class BaseClass1>) ldstr "G1_C15::Method5.MI.4890()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C808`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>() calli default string(class G2_C808`2<class BaseClass0,class BaseClass1>) ldstr "G1_C15::Method6.MI.4892<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C808`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G2_C808`2<class BaseClass0,class BaseClass1>) ldstr "G1_C15::Method7.MI.4886<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C808`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C808`2<class BaseClass0,class BaseClass1>::ClassMethod2999<object>() calli default string(class G2_C808`2<class BaseClass0,class BaseClass1>) ldstr "G2_C808::ClassMethod2999.12469<System.Object>()" ldstr "class G2_C808`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C808`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C808`2<class BaseClass0,class BaseClass1>::Method6<object>() calli default string(class G2_C808`2<class BaseClass0,class BaseClass1>) ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G2_C808`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C808`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C808`2<class BaseClass0,class BaseClass1>::Method5() calli default string(class G2_C808`2<class BaseClass0,class BaseClass1>) ldstr "G1_C15::Method5.4889()" ldstr "class G2_C808`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C808`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C808`2<class BaseClass0,class BaseClass1>::Method4() calli default string(class G2_C808`2<class BaseClass0,class BaseClass1>) ldstr "G1_C15::Method4.4887()" ldstr "class G2_C808`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C808`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C808`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G2_C808`2<class BaseClass0,class BaseClass1>) ldstr "G1_C15::Method7.4885<System.Object>()" ldstr "class G2_C808`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) newobj instance void class G2_C808`2<class BaseClass1,class BaseClass0>::.ctor() stloc.0 ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method6<object>() calli default string(class G2_C808`2<class BaseClass1,class BaseClass0>) ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method5() calli default string(class G2_C808`2<class BaseClass1,class BaseClass0>) ldstr "G1_C15::Method5.4889()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method4() calli default string(class G2_C808`2<class BaseClass1,class BaseClass0>) ldstr "G1_C15::Method4.4887()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G2_C808`2<class BaseClass1,class BaseClass0>) ldstr "G1_C15::Method7.4885<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G2_C808`2<class BaseClass1,class BaseClass0>) ldstr "G1_C15::Method7.MI.4886<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C808`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4() calli default string(class G2_C808`2<class BaseClass1,class BaseClass0>) ldstr "G1_C15::Method4.MI.4888()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C808`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5() calli default string(class G2_C808`2<class BaseClass1,class BaseClass0>) ldstr "G1_C15::Method5.MI.4890()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C808`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>() calli default string(class G2_C808`2<class BaseClass1,class BaseClass0>) ldstr "G1_C15::Method6.MI.4892<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C808`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G2_C808`2<class BaseClass1,class BaseClass0>) ldstr "G1_C15::Method7.MI.4886<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C808`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C808`2<class BaseClass1,class BaseClass0>::ClassMethod2999<object>() calli default string(class G2_C808`2<class BaseClass1,class BaseClass0>) ldstr "G2_C808::ClassMethod2999.12469<System.Object>()" ldstr "class G2_C808`2<class BaseClass1,class BaseClass0> on type class G2_C808`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C808`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C808`2<class BaseClass1,class BaseClass0>::Method6<object>() calli default string(class G2_C808`2<class BaseClass1,class BaseClass0>) ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G2_C808`2<class BaseClass1,class BaseClass0> on type class G2_C808`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C808`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C808`2<class BaseClass1,class BaseClass0>::Method5() calli default string(class G2_C808`2<class BaseClass1,class BaseClass0>) ldstr "G1_C15::Method5.4889()" ldstr "class G2_C808`2<class BaseClass1,class BaseClass0> on type class G2_C808`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C808`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C808`2<class BaseClass1,class BaseClass0>::Method4() calli default string(class G2_C808`2<class BaseClass1,class BaseClass0>) ldstr "G1_C15::Method4.4887()" ldstr "class G2_C808`2<class BaseClass1,class BaseClass0> on type class G2_C808`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C808`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C808`2<class BaseClass1,class BaseClass0>::Method7<object>() calli default string(class G2_C808`2<class BaseClass1,class BaseClass0>) ldstr "G1_C15::Method7.4885<System.Object>()" ldstr "class G2_C808`2<class BaseClass1,class BaseClass0> on type class G2_C808`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) newobj instance void class G2_C808`2<class BaseClass1,class BaseClass1>::.ctor() stloc.0 ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method6<object>() calli default string(class G2_C808`2<class BaseClass1,class BaseClass1>) ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method5() calli default string(class G2_C808`2<class BaseClass1,class BaseClass1>) ldstr "G1_C15::Method5.4889()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method4() calli default string(class G2_C808`2<class BaseClass1,class BaseClass1>) ldstr "G1_C15::Method4.4887()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G2_C808`2<class BaseClass1,class BaseClass1>) ldstr "G1_C15::Method7.4885<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G2_C808`2<class BaseClass1,class BaseClass1>) ldstr "G1_C15::Method7.MI.4886<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C808`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4() calli default string(class G2_C808`2<class BaseClass1,class BaseClass1>) ldstr "G1_C15::Method4.MI.4888()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C808`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5() calli default string(class G2_C808`2<class BaseClass1,class BaseClass1>) ldstr "G1_C15::Method5.MI.4890()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C808`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>() calli default string(class G2_C808`2<class BaseClass1,class BaseClass1>) ldstr "G1_C15::Method6.MI.4892<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C808`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G2_C808`2<class BaseClass1,class BaseClass1>) ldstr "G1_C15::Method7.MI.4886<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C808`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C808`2<class BaseClass1,class BaseClass1>::ClassMethod2999<object>() calli default string(class G2_C808`2<class BaseClass1,class BaseClass1>) ldstr "G2_C808::ClassMethod2999.12469<System.Object>()" ldstr "class G2_C808`2<class BaseClass1,class BaseClass1> on type class G2_C808`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C808`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C808`2<class BaseClass1,class BaseClass1>::Method6<object>() calli default string(class G2_C808`2<class BaseClass1,class BaseClass1>) ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G2_C808`2<class BaseClass1,class BaseClass1> on type class G2_C808`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C808`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C808`2<class BaseClass1,class BaseClass1>::Method5() calli default string(class G2_C808`2<class BaseClass1,class BaseClass1>) ldstr "G1_C15::Method5.4889()" ldstr "class G2_C808`2<class BaseClass1,class BaseClass1> on type class G2_C808`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C808`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C808`2<class BaseClass1,class BaseClass1>::Method4() calli default string(class G2_C808`2<class BaseClass1,class BaseClass1>) ldstr "G1_C15::Method4.4887()" ldstr "class G2_C808`2<class BaseClass1,class BaseClass1> on type class G2_C808`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C808`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C808`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G2_C808`2<class BaseClass1,class BaseClass1>) ldstr "G1_C15::Method7.4885<System.Object>()" ldstr "class G2_C808`2<class BaseClass1,class BaseClass1> on type class G2_C808`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldstr "========================================================================\n\n" call void [mscorlib]System.Console::WriteLine(string) ret } .method public hidebysig static int32 Main() cil managed { .custom instance void [xunit.core]Xunit.FactAttribute::.ctor() = ( 01 00 00 00 ) .entrypoint .maxstack 10 call void Generated1399::MethodCallingTest() call void Generated1399::ConstrainedCallsTest() call void Generated1399::StructConstrainedInterfaceCallsTest() call void Generated1399::CalliTest() ldc.i4 100 ret } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. .assembly extern mscorlib { .publickeytoken = (B7 7A 5C 56 19 34 E0 89 ) .ver 4:0:0:0 } .assembly extern TestFramework { .publickeytoken = ( B0 3F 5F 7F 11 D5 0A 3A ) } //TYPES IN FORWARDER ASSEMBLIES: //TEST ASSEMBLY: .assembly Generated1399 { .hash algorithm 0x00008004 } .assembly extern xunit.core {} .class public BaseClass0 { .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ldarg.0 call instance void [mscorlib]System.Object::.ctor() ret } } .class public BaseClass1 extends BaseClass0 { .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ldarg.0 call instance void BaseClass0::.ctor() ret } } .class public G3_C1871`1<T0> extends class G2_C808`2<class BaseClass1,class BaseClass1> implements class IBase2`2<!T0,class BaseClass1> { .method public hidebysig newslot virtual instance string Method7<M0>() cil managed noinlining { ldstr "G3_C1871::Method7.18455<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string ClassMethod5134() cil managed noinlining { ldstr "G3_C1871::ClassMethod5134.18456()" ret } .method public hidebysig newslot virtual instance string ClassMethod5135<M0>() cil managed noinlining { ldstr "G3_C1871::ClassMethod5135.18457<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string ClassMethod5136<M0>() cil managed noinlining { ldstr "G3_C1871::ClassMethod5136.18458<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ldarg.0 call instance void class G2_C808`2<class BaseClass1,class BaseClass1>::.ctor() ret } } .class public G2_C808`2<T0, T1> extends class G1_C15`2<class BaseClass0,class BaseClass1> { .method public hidebysig newslot virtual instance string ClassMethod2999<M0>() cil managed noinlining { ldstr "G2_C808::ClassMethod2999.12469<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ldarg.0 call instance void class G1_C15`2<class BaseClass0,class BaseClass1>::.ctor() ret } } .class interface public abstract IBase2`2<+T0, -T1> { .method public hidebysig newslot abstract virtual instance string Method7<M0>() cil managed { } } .class public abstract G1_C15`2<T0, T1> implements class IBase2`2<!T1,!T1>, class IBase1`1<class BaseClass0> { .method public hidebysig newslot virtual instance string Method7<M0>() cil managed noinlining { ldstr "G1_C15::Method7.4885<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string 'IBase2<T1,T1>.Method7'<M0>() cil managed noinlining { .override method instance string class IBase2`2<!T1,!T1>::Method7<[1]>() ldstr "G1_C15::Method7.MI.4886<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig virtual instance string Method4() cil managed noinlining { ldstr "G1_C15::Method4.4887()" ret } .method public hidebysig newslot virtual instance string 'IBase1<class BaseClass0>.Method4'() cil managed noinlining { .override method instance string class IBase1`1<class BaseClass0>::Method4() ldstr "G1_C15::Method4.MI.4888()" ret } .method public hidebysig virtual instance string Method5() cil managed noinlining { ldstr "G1_C15::Method5.4889()" ret } .method public hidebysig newslot virtual instance string 'IBase1<class BaseClass0>.Method5'() cil managed noinlining { .override method instance string class IBase1`1<class BaseClass0>::Method5() ldstr "G1_C15::Method5.MI.4890()" ret } .method public hidebysig virtual instance string Method6<M0>() cil managed noinlining { ldstr "G1_C15::Method6.4891<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string 'IBase1<class BaseClass0>.Method6'<M0>() cil managed noinlining { .override method instance string class IBase1`1<class BaseClass0>::Method6<[1]>() ldstr "G1_C15::Method6.MI.4892<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ldarg.0 call instance void [mscorlib]System.Object::.ctor() ret } } .class interface public abstract IBase1`1<+T0> { .method public hidebysig newslot abstract virtual instance string Method4() cil managed { } .method public hidebysig newslot abstract virtual instance string Method5() cil managed { } .method public hidebysig newslot abstract virtual instance string Method6<M0>() cil managed { } } .class public auto ansi beforefieldinit Generated1399 { .method static void M.BaseClass0<(BaseClass0)W>(!!W inst, string exp) cil managed { .maxstack 5 .locals init (string[] actualResults) ldc.i4.s 0 newarr string stloc.s actualResults ldarg.1 ldstr "M.BaseClass0<(BaseClass0)W>(!!W inst, string exp)" ldc.i4.s 0 ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.BaseClass1<(BaseClass1)W>(!!W inst, string exp) cil managed { .maxstack 5 .locals init (string[] actualResults) ldc.i4.s 0 newarr string stloc.s actualResults ldarg.1 ldstr "M.BaseClass1<(BaseClass1)W>(!!W inst, string exp)" ldc.i4.s 0 ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G3_C1871.T<T0,(class G3_C1871`1<!!T0>)W>(!!W 'inst', string exp) cil managed { .maxstack 13 .locals init (string[] actualResults) ldc.i4.s 8 newarr string stloc.s actualResults ldarg.1 ldstr "M.G3_C1871.T<T0,(class G3_C1871`1<!!T0>)W>(!!W 'inst', string exp)" ldc.i4.s 8 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1871`1<!!T0>::ClassMethod2999<object>() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1871`1<!!T0>::ClassMethod5134() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1871`1<!!T0>::ClassMethod5135<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1871`1<!!T0>::ClassMethod5136<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1871`1<!!T0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1871`1<!!T0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1871`1<!!T0>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1871`1<!!T0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G3_C1871.A<(class G3_C1871`1<class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 13 .locals init (string[] actualResults) ldc.i4.s 8 newarr string stloc.s actualResults ldarg.1 ldstr "M.G3_C1871.A<(class G3_C1871`1<class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 8 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1871`1<class BaseClass0>::ClassMethod2999<object>() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1871`1<class BaseClass0>::ClassMethod5134() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1871`1<class BaseClass0>::ClassMethod5135<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1871`1<class BaseClass0>::ClassMethod5136<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1871`1<class BaseClass0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1871`1<class BaseClass0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1871`1<class BaseClass0>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1871`1<class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G3_C1871.B<(class G3_C1871`1<class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 13 .locals init (string[] actualResults) ldc.i4.s 8 newarr string stloc.s actualResults ldarg.1 ldstr "M.G3_C1871.B<(class G3_C1871`1<class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 8 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1871`1<class BaseClass1>::ClassMethod2999<object>() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1871`1<class BaseClass1>::ClassMethod5134() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1871`1<class BaseClass1>::ClassMethod5135<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1871`1<class BaseClass1>::ClassMethod5136<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1871`1<class BaseClass1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1871`1<class BaseClass1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1871`1<class BaseClass1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1871`1<class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G2_C808.T.T<T0,T1,(class G2_C808`2<!!T0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 10 .locals init (string[] actualResults) ldc.i4.s 5 newarr string stloc.s actualResults ldarg.1 ldstr "M.G2_C808.T.T<T0,T1,(class G2_C808`2<!!T0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 5 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G2_C808`2<!!T0,!!T1>::ClassMethod2999<object>() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G2_C808`2<!!T0,!!T1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G2_C808`2<!!T0,!!T1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G2_C808`2<!!T0,!!T1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G2_C808`2<!!T0,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G2_C808.A.T<T1,(class G2_C808`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 10 .locals init (string[] actualResults) ldc.i4.s 5 newarr string stloc.s actualResults ldarg.1 ldstr "M.G2_C808.A.T<T1,(class G2_C808`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 5 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G2_C808`2<class BaseClass0,!!T1>::ClassMethod2999<object>() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G2_C808`2<class BaseClass0,!!T1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G2_C808`2<class BaseClass0,!!T1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G2_C808`2<class BaseClass0,!!T1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G2_C808`2<class BaseClass0,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G2_C808.A.A<(class G2_C808`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 10 .locals init (string[] actualResults) ldc.i4.s 5 newarr string stloc.s actualResults ldarg.1 ldstr "M.G2_C808.A.A<(class G2_C808`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 5 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G2_C808`2<class BaseClass0,class BaseClass0>::ClassMethod2999<object>() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G2_C808`2<class BaseClass0,class BaseClass0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G2_C808`2<class BaseClass0,class BaseClass0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G2_C808`2<class BaseClass0,class BaseClass0>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G2_C808`2<class BaseClass0,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G2_C808.A.B<(class G2_C808`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 10 .locals init (string[] actualResults) ldc.i4.s 5 newarr string stloc.s actualResults ldarg.1 ldstr "M.G2_C808.A.B<(class G2_C808`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 5 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G2_C808`2<class BaseClass0,class BaseClass1>::ClassMethod2999<object>() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G2_C808`2<class BaseClass0,class BaseClass1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G2_C808`2<class BaseClass0,class BaseClass1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G2_C808`2<class BaseClass0,class BaseClass1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G2_C808`2<class BaseClass0,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G2_C808.B.T<T1,(class G2_C808`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 10 .locals init (string[] actualResults) ldc.i4.s 5 newarr string stloc.s actualResults ldarg.1 ldstr "M.G2_C808.B.T<T1,(class G2_C808`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 5 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G2_C808`2<class BaseClass1,!!T1>::ClassMethod2999<object>() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G2_C808`2<class BaseClass1,!!T1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G2_C808`2<class BaseClass1,!!T1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G2_C808`2<class BaseClass1,!!T1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G2_C808`2<class BaseClass1,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G2_C808.B.A<(class G2_C808`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 10 .locals init (string[] actualResults) ldc.i4.s 5 newarr string stloc.s actualResults ldarg.1 ldstr "M.G2_C808.B.A<(class G2_C808`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 5 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G2_C808`2<class BaseClass1,class BaseClass0>::ClassMethod2999<object>() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G2_C808`2<class BaseClass1,class BaseClass0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G2_C808`2<class BaseClass1,class BaseClass0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G2_C808`2<class BaseClass1,class BaseClass0>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G2_C808`2<class BaseClass1,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G2_C808.B.B<(class G2_C808`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 10 .locals init (string[] actualResults) ldc.i4.s 5 newarr string stloc.s actualResults ldarg.1 ldstr "M.G2_C808.B.B<(class G2_C808`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 5 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G2_C808`2<class BaseClass1,class BaseClass1>::ClassMethod2999<object>() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G2_C808`2<class BaseClass1,class BaseClass1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G2_C808`2<class BaseClass1,class BaseClass1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G2_C808`2<class BaseClass1,class BaseClass1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G2_C808`2<class BaseClass1,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.T.T<T0,T1,(class IBase2`2<!!T0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.T.T<T0,T1,(class IBase2`2<!!T0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<!!T0,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.A.T<T1,(class IBase2`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.A.T<T1,(class IBase2`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass0,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.A.A<(class IBase2`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.A.A<(class IBase2`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.A.B<(class IBase2`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.A.B<(class IBase2`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.B.T<T1,(class IBase2`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.B.T<T1,(class IBase2`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass1,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.B.A<(class IBase2`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.B.A<(class IBase2`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.B.B<(class IBase2`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.B.B<(class IBase2`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C15.T.T<T0,T1,(class G1_C15`2<!!T0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 9 .locals init (string[] actualResults) ldc.i4.s 4 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C15.T.T<T0,T1,(class G1_C15`2<!!T0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 4 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<!!T0,!!T1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<!!T0,!!T1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<!!T0,!!T1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<!!T0,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C15.A.T<T1,(class G1_C15`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 9 .locals init (string[] actualResults) ldc.i4.s 4 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C15.A.T<T1,(class G1_C15`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 4 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass0,!!T1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass0,!!T1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass0,!!T1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass0,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C15.A.A<(class G1_C15`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 9 .locals init (string[] actualResults) ldc.i4.s 4 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C15.A.A<(class G1_C15`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 4 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C15.A.B<(class G1_C15`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 9 .locals init (string[] actualResults) ldc.i4.s 4 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C15.A.B<(class G1_C15`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 4 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C15.B.T<T1,(class G1_C15`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 9 .locals init (string[] actualResults) ldc.i4.s 4 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C15.B.T<T1,(class G1_C15`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 4 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass1,!!T1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass1,!!T1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass1,!!T1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass1,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C15.B.A<(class G1_C15`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 9 .locals init (string[] actualResults) ldc.i4.s 4 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C15.B.A<(class G1_C15`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 4 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass0>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C15.B.B<(class G1_C15`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 9 .locals init (string[] actualResults) ldc.i4.s 4 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C15.B.B<(class G1_C15`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 4 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase1.T<T0,(class IBase1`1<!!T0>)W>(!!W 'inst', string exp) cil managed { .maxstack 8 .locals init (string[] actualResults) ldc.i4.s 3 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase1.T<T0,(class IBase1`1<!!T0>)W>(!!W 'inst', string exp)" ldc.i4.s 3 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<!!T0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<!!T0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<!!T0>::Method6<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase1.A<(class IBase1`1<class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 8 .locals init (string[] actualResults) ldc.i4.s 3 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase1.A<(class IBase1`1<class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 3 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<class BaseClass0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<class BaseClass0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase1.B<(class IBase1`1<class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 8 .locals init (string[] actualResults) ldc.i4.s 3 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase1.B<(class IBase1`1<class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 3 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<class BaseClass1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<class BaseClass1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method public hidebysig static void MethodCallingTest() cil managed { .maxstack 10 .locals init (object V_0) ldstr "========================== Method Calling Test ==========================" call void [mscorlib]System.Console::WriteLine(string) newobj instance void class G3_C1871`1<class BaseClass0>::.ctor() stloc.0 ldloc.0 dup castclass class G1_C15`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method6<object>() ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C15`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method5() ldstr "G1_C15::Method5.4889()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C15`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method4() ldstr "G1_C15::Method4.4887()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C15`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G1_C15::Method7.4885<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G1_C15::Method7.MI.4886<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass0>::Method4() ldstr "G1_C15::Method4.MI.4888()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method5() ldstr "G1_C15::Method5.MI.4890()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() ldstr "G1_C15::Method6.MI.4892<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G3_C1871::Method7.18455<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G2_C808`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C808`2<class BaseClass1,class BaseClass1>::ClassMethod2999<object>() ldstr "G2_C808::ClassMethod2999.12469<System.Object>()" ldstr "class G2_C808`2<class BaseClass1,class BaseClass1> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C808`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C808`2<class BaseClass1,class BaseClass1>::Method6<object>() ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G2_C808`2<class BaseClass1,class BaseClass1> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C808`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C808`2<class BaseClass1,class BaseClass1>::Method5() ldstr "G1_C15::Method5.4889()" ldstr "class G2_C808`2<class BaseClass1,class BaseClass1> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C808`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C808`2<class BaseClass1,class BaseClass1>::Method4() ldstr "G1_C15::Method4.4887()" ldstr "class G2_C808`2<class BaseClass1,class BaseClass1> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C808`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C808`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G1_C15::Method7.4885<System.Object>()" ldstr "class G2_C808`2<class BaseClass1,class BaseClass1> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G3_C1871`1<class BaseClass0> callvirt instance string class G3_C1871`1<class BaseClass0>::ClassMethod5136<object>() ldstr "G3_C1871::ClassMethod5136.18458<System.Object>()" ldstr "class G3_C1871`1<class BaseClass0> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1871`1<class BaseClass0> callvirt instance string class G3_C1871`1<class BaseClass0>::ClassMethod5135<object>() ldstr "G3_C1871::ClassMethod5135.18457<System.Object>()" ldstr "class G3_C1871`1<class BaseClass0> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1871`1<class BaseClass0> callvirt instance string class G3_C1871`1<class BaseClass0>::ClassMethod5134() ldstr "G3_C1871::ClassMethod5134.18456()" ldstr "class G3_C1871`1<class BaseClass0> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1871`1<class BaseClass0> callvirt instance string class G3_C1871`1<class BaseClass0>::Method7<object>() ldstr "G3_C1871::Method7.18455<System.Object>()" ldstr "class G3_C1871`1<class BaseClass0> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1871`1<class BaseClass0> callvirt instance string class G3_C1871`1<class BaseClass0>::ClassMethod2999<object>() ldstr "G2_C808::ClassMethod2999.12469<System.Object>()" ldstr "class G3_C1871`1<class BaseClass0> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1871`1<class BaseClass0> callvirt instance string class G3_C1871`1<class BaseClass0>::Method6<object>() ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G3_C1871`1<class BaseClass0> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1871`1<class BaseClass0> callvirt instance string class G3_C1871`1<class BaseClass0>::Method5() ldstr "G1_C15::Method5.4889()" ldstr "class G3_C1871`1<class BaseClass0> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1871`1<class BaseClass0> callvirt instance string class G3_C1871`1<class BaseClass0>::Method4() ldstr "G1_C15::Method4.4887()" ldstr "class G3_C1871`1<class BaseClass0> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop newobj instance void class G3_C1871`1<class BaseClass1>::.ctor() stloc.0 ldloc.0 dup castclass class G1_C15`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method6<object>() ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C15`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method5() ldstr "G1_C15::Method5.4889()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C15`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method4() ldstr "G1_C15::Method4.4887()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C15`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G1_C15::Method7.4885<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G3_C1871::Method7.18455<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass0>::Method4() ldstr "G1_C15::Method4.MI.4888()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method5() ldstr "G1_C15::Method5.MI.4890()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() ldstr "G1_C15::Method6.MI.4892<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G3_C1871::Method7.18455<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G2_C808`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C808`2<class BaseClass1,class BaseClass1>::ClassMethod2999<object>() ldstr "G2_C808::ClassMethod2999.12469<System.Object>()" ldstr "class G2_C808`2<class BaseClass1,class BaseClass1> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C808`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C808`2<class BaseClass1,class BaseClass1>::Method6<object>() ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G2_C808`2<class BaseClass1,class BaseClass1> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C808`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C808`2<class BaseClass1,class BaseClass1>::Method5() ldstr "G1_C15::Method5.4889()" ldstr "class G2_C808`2<class BaseClass1,class BaseClass1> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C808`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C808`2<class BaseClass1,class BaseClass1>::Method4() ldstr "G1_C15::Method4.4887()" ldstr "class G2_C808`2<class BaseClass1,class BaseClass1> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C808`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C808`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G1_C15::Method7.4885<System.Object>()" ldstr "class G2_C808`2<class BaseClass1,class BaseClass1> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G3_C1871`1<class BaseClass1> callvirt instance string class G3_C1871`1<class BaseClass1>::ClassMethod5136<object>() ldstr "G3_C1871::ClassMethod5136.18458<System.Object>()" ldstr "class G3_C1871`1<class BaseClass1> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1871`1<class BaseClass1> callvirt instance string class G3_C1871`1<class BaseClass1>::ClassMethod5135<object>() ldstr "G3_C1871::ClassMethod5135.18457<System.Object>()" ldstr "class G3_C1871`1<class BaseClass1> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1871`1<class BaseClass1> callvirt instance string class G3_C1871`1<class BaseClass1>::ClassMethod5134() ldstr "G3_C1871::ClassMethod5134.18456()" ldstr "class G3_C1871`1<class BaseClass1> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1871`1<class BaseClass1> callvirt instance string class G3_C1871`1<class BaseClass1>::Method7<object>() ldstr "G3_C1871::Method7.18455<System.Object>()" ldstr "class G3_C1871`1<class BaseClass1> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1871`1<class BaseClass1> callvirt instance string class G3_C1871`1<class BaseClass1>::ClassMethod2999<object>() ldstr "G2_C808::ClassMethod2999.12469<System.Object>()" ldstr "class G3_C1871`1<class BaseClass1> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1871`1<class BaseClass1> callvirt instance string class G3_C1871`1<class BaseClass1>::Method6<object>() ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G3_C1871`1<class BaseClass1> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1871`1<class BaseClass1> callvirt instance string class G3_C1871`1<class BaseClass1>::Method5() ldstr "G1_C15::Method5.4889()" ldstr "class G3_C1871`1<class BaseClass1> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1871`1<class BaseClass1> callvirt instance string class G3_C1871`1<class BaseClass1>::Method4() ldstr "G1_C15::Method4.4887()" ldstr "class G3_C1871`1<class BaseClass1> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop newobj instance void class G2_C808`2<class BaseClass0,class BaseClass0>::.ctor() stloc.0 ldloc.0 dup castclass class G1_C15`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method6<object>() ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C15`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method5() ldstr "G1_C15::Method5.4889()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C15`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method4() ldstr "G1_C15::Method4.4887()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C15`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G1_C15::Method7.4885<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G1_C15::Method7.MI.4886<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C808`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass0>::Method4() ldstr "G1_C15::Method4.MI.4888()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C808`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method5() ldstr "G1_C15::Method5.MI.4890()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C808`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() ldstr "G1_C15::Method6.MI.4892<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C808`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G1_C15::Method7.MI.4886<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G2_C808`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C808`2<class BaseClass0,class BaseClass0>::ClassMethod2999<object>() ldstr "G2_C808::ClassMethod2999.12469<System.Object>()" ldstr "class G2_C808`2<class BaseClass0,class BaseClass0> on type class G2_C808`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C808`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C808`2<class BaseClass0,class BaseClass0>::Method6<object>() ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G2_C808`2<class BaseClass0,class BaseClass0> on type class G2_C808`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C808`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C808`2<class BaseClass0,class BaseClass0>::Method5() ldstr "G1_C15::Method5.4889()" ldstr "class G2_C808`2<class BaseClass0,class BaseClass0> on type class G2_C808`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C808`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C808`2<class BaseClass0,class BaseClass0>::Method4() ldstr "G1_C15::Method4.4887()" ldstr "class G2_C808`2<class BaseClass0,class BaseClass0> on type class G2_C808`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C808`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C808`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "G1_C15::Method7.4885<System.Object>()" ldstr "class G2_C808`2<class BaseClass0,class BaseClass0> on type class G2_C808`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop newobj instance void class G2_C808`2<class BaseClass0,class BaseClass1>::.ctor() stloc.0 ldloc.0 dup castclass class G1_C15`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method6<object>() ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C15`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method5() ldstr "G1_C15::Method5.4889()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C15`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method4() ldstr "G1_C15::Method4.4887()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C15`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G1_C15::Method7.4885<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G1_C15::Method7.MI.4886<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C808`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass0>::Method4() ldstr "G1_C15::Method4.MI.4888()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C808`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method5() ldstr "G1_C15::Method5.MI.4890()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C808`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() ldstr "G1_C15::Method6.MI.4892<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C808`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G1_C15::Method7.MI.4886<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G2_C808`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C808`2<class BaseClass0,class BaseClass1>::ClassMethod2999<object>() ldstr "G2_C808::ClassMethod2999.12469<System.Object>()" ldstr "class G2_C808`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C808`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C808`2<class BaseClass0,class BaseClass1>::Method6<object>() ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G2_C808`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C808`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C808`2<class BaseClass0,class BaseClass1>::Method5() ldstr "G1_C15::Method5.4889()" ldstr "class G2_C808`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C808`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C808`2<class BaseClass0,class BaseClass1>::Method4() ldstr "G1_C15::Method4.4887()" ldstr "class G2_C808`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C808`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C808`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G1_C15::Method7.4885<System.Object>()" ldstr "class G2_C808`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop newobj instance void class G2_C808`2<class BaseClass1,class BaseClass0>::.ctor() stloc.0 ldloc.0 dup castclass class G1_C15`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method6<object>() ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C15`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method5() ldstr "G1_C15::Method5.4889()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C15`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method4() ldstr "G1_C15::Method4.4887()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C15`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G1_C15::Method7.4885<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G1_C15::Method7.MI.4886<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C808`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass0>::Method4() ldstr "G1_C15::Method4.MI.4888()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C808`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method5() ldstr "G1_C15::Method5.MI.4890()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C808`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() ldstr "G1_C15::Method6.MI.4892<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C808`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G1_C15::Method7.MI.4886<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G2_C808`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C808`2<class BaseClass1,class BaseClass0>::ClassMethod2999<object>() ldstr "G2_C808::ClassMethod2999.12469<System.Object>()" ldstr "class G2_C808`2<class BaseClass1,class BaseClass0> on type class G2_C808`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C808`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C808`2<class BaseClass1,class BaseClass0>::Method6<object>() ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G2_C808`2<class BaseClass1,class BaseClass0> on type class G2_C808`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C808`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C808`2<class BaseClass1,class BaseClass0>::Method5() ldstr "G1_C15::Method5.4889()" ldstr "class G2_C808`2<class BaseClass1,class BaseClass0> on type class G2_C808`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C808`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C808`2<class BaseClass1,class BaseClass0>::Method4() ldstr "G1_C15::Method4.4887()" ldstr "class G2_C808`2<class BaseClass1,class BaseClass0> on type class G2_C808`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C808`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C808`2<class BaseClass1,class BaseClass0>::Method7<object>() ldstr "G1_C15::Method7.4885<System.Object>()" ldstr "class G2_C808`2<class BaseClass1,class BaseClass0> on type class G2_C808`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop newobj instance void class G2_C808`2<class BaseClass1,class BaseClass1>::.ctor() stloc.0 ldloc.0 dup castclass class G1_C15`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method6<object>() ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C15`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method5() ldstr "G1_C15::Method5.4889()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C15`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method4() ldstr "G1_C15::Method4.4887()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C15`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G1_C15::Method7.4885<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G1_C15::Method7.MI.4886<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C808`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass0>::Method4() ldstr "G1_C15::Method4.MI.4888()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C808`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method5() ldstr "G1_C15::Method5.MI.4890()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C808`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() ldstr "G1_C15::Method6.MI.4892<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C808`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G1_C15::Method7.MI.4886<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G2_C808`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C808`2<class BaseClass1,class BaseClass1>::ClassMethod2999<object>() ldstr "G2_C808::ClassMethod2999.12469<System.Object>()" ldstr "class G2_C808`2<class BaseClass1,class BaseClass1> on type class G2_C808`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C808`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C808`2<class BaseClass1,class BaseClass1>::Method6<object>() ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G2_C808`2<class BaseClass1,class BaseClass1> on type class G2_C808`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C808`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C808`2<class BaseClass1,class BaseClass1>::Method5() ldstr "G1_C15::Method5.4889()" ldstr "class G2_C808`2<class BaseClass1,class BaseClass1> on type class G2_C808`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C808`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C808`2<class BaseClass1,class BaseClass1>::Method4() ldstr "G1_C15::Method4.4887()" ldstr "class G2_C808`2<class BaseClass1,class BaseClass1> on type class G2_C808`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C808`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C808`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G1_C15::Method7.4885<System.Object>()" ldstr "class G2_C808`2<class BaseClass1,class BaseClass1> on type class G2_C808`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldstr "========================================================================\n\n" call void [mscorlib]System.Console::WriteLine(string) ret } .method public hidebysig static void ConstrainedCallsTest() cil managed { .maxstack 10 .locals init (object V_0) ldstr "========================== Constrained Calls Test ==========================" call void [mscorlib]System.Console::WriteLine(string) newobj instance void class G3_C1871`1<class BaseClass0>::.ctor() stloc.0 ldloc.0 ldstr "G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G1_C15::Method7.4885<System.Object>()#" call void Generated1399::M.G1_C15.T.T<class BaseClass0,class BaseClass1,class G3_C1871`1<class BaseClass0>>(!!2,string) ldloc.0 ldstr "G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G1_C15::Method7.4885<System.Object>()#" call void Generated1399::M.G1_C15.A.T<class BaseClass1,class G3_C1871`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G1_C15::Method7.4885<System.Object>()#" call void Generated1399::M.G1_C15.A.B<class G3_C1871`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C15::Method7.MI.4886<System.Object>()#" call void Generated1399::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G3_C1871`1<class BaseClass0>>(!!2,string) ldloc.0 ldstr "G1_C15::Method7.MI.4886<System.Object>()#" call void Generated1399::M.IBase2.B.T<class BaseClass1,class G3_C1871`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C15::Method7.MI.4886<System.Object>()#" call void Generated1399::M.IBase2.B.B<class G3_C1871`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C15::Method4.MI.4888()#G1_C15::Method5.MI.4890()#G1_C15::Method6.MI.4892<System.Object>()#" call void Generated1399::M.IBase1.T<class BaseClass0,class G3_C1871`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C15::Method4.MI.4888()#G1_C15::Method5.MI.4890()#G1_C15::Method6.MI.4892<System.Object>()#" call void Generated1399::M.IBase1.A<class G3_C1871`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G3_C1871::Method7.18455<System.Object>()#" call void Generated1399::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G3_C1871`1<class BaseClass0>>(!!2,string) ldloc.0 ldstr "G3_C1871::Method7.18455<System.Object>()#" call void Generated1399::M.IBase2.A.T<class BaseClass1,class G3_C1871`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G3_C1871::Method7.18455<System.Object>()#" call void Generated1399::M.IBase2.A.B<class G3_C1871`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C808::ClassMethod2999.12469<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G1_C15::Method7.4885<System.Object>()#" call void Generated1399::M.G2_C808.T.T<class BaseClass1,class BaseClass1,class G3_C1871`1<class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C808::ClassMethod2999.12469<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G1_C15::Method7.4885<System.Object>()#" call void Generated1399::M.G2_C808.B.T<class BaseClass1,class G3_C1871`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C808::ClassMethod2999.12469<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G1_C15::Method7.4885<System.Object>()#" call void Generated1399::M.G2_C808.B.B<class G3_C1871`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C808::ClassMethod2999.12469<System.Object>()#G3_C1871::ClassMethod5134.18456()#G3_C1871::ClassMethod5135.18457<System.Object>()#G3_C1871::ClassMethod5136.18458<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G3_C1871::Method7.18455<System.Object>()#" call void Generated1399::M.G3_C1871.T<class BaseClass0,class G3_C1871`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C808::ClassMethod2999.12469<System.Object>()#G3_C1871::ClassMethod5134.18456()#G3_C1871::ClassMethod5135.18457<System.Object>()#G3_C1871::ClassMethod5136.18458<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G3_C1871::Method7.18455<System.Object>()#" call void Generated1399::M.G3_C1871.A<class G3_C1871`1<class BaseClass0>>(!!0,string) newobj instance void class G3_C1871`1<class BaseClass1>::.ctor() stloc.0 ldloc.0 ldstr "G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G1_C15::Method7.4885<System.Object>()#" call void Generated1399::M.G1_C15.T.T<class BaseClass0,class BaseClass1,class G3_C1871`1<class BaseClass1>>(!!2,string) ldloc.0 ldstr "G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G1_C15::Method7.4885<System.Object>()#" call void Generated1399::M.G1_C15.A.T<class BaseClass1,class G3_C1871`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G1_C15::Method7.4885<System.Object>()#" call void Generated1399::M.G1_C15.A.B<class G3_C1871`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G3_C1871::Method7.18455<System.Object>()#" call void Generated1399::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G3_C1871`1<class BaseClass1>>(!!2,string) ldloc.0 ldstr "G3_C1871::Method7.18455<System.Object>()#" call void Generated1399::M.IBase2.B.T<class BaseClass1,class G3_C1871`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G3_C1871::Method7.18455<System.Object>()#" call void Generated1399::M.IBase2.B.B<class G3_C1871`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C15::Method4.MI.4888()#G1_C15::Method5.MI.4890()#G1_C15::Method6.MI.4892<System.Object>()#" call void Generated1399::M.IBase1.T<class BaseClass0,class G3_C1871`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C15::Method4.MI.4888()#G1_C15::Method5.MI.4890()#G1_C15::Method6.MI.4892<System.Object>()#" call void Generated1399::M.IBase1.A<class G3_C1871`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G3_C1871::Method7.18455<System.Object>()#" call void Generated1399::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G3_C1871`1<class BaseClass1>>(!!2,string) ldloc.0 ldstr "G3_C1871::Method7.18455<System.Object>()#" call void Generated1399::M.IBase2.A.T<class BaseClass1,class G3_C1871`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G3_C1871::Method7.18455<System.Object>()#" call void Generated1399::M.IBase2.A.B<class G3_C1871`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C808::ClassMethod2999.12469<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G1_C15::Method7.4885<System.Object>()#" call void Generated1399::M.G2_C808.T.T<class BaseClass1,class BaseClass1,class G3_C1871`1<class BaseClass1>>(!!2,string) ldloc.0 ldstr "G2_C808::ClassMethod2999.12469<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G1_C15::Method7.4885<System.Object>()#" call void Generated1399::M.G2_C808.B.T<class BaseClass1,class G3_C1871`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C808::ClassMethod2999.12469<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G1_C15::Method7.4885<System.Object>()#" call void Generated1399::M.G2_C808.B.B<class G3_C1871`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C808::ClassMethod2999.12469<System.Object>()#G3_C1871::ClassMethod5134.18456()#G3_C1871::ClassMethod5135.18457<System.Object>()#G3_C1871::ClassMethod5136.18458<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G3_C1871::Method7.18455<System.Object>()#" call void Generated1399::M.G3_C1871.T<class BaseClass1,class G3_C1871`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C808::ClassMethod2999.12469<System.Object>()#G3_C1871::ClassMethod5134.18456()#G3_C1871::ClassMethod5135.18457<System.Object>()#G3_C1871::ClassMethod5136.18458<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G3_C1871::Method7.18455<System.Object>()#" call void Generated1399::M.G3_C1871.B<class G3_C1871`1<class BaseClass1>>(!!0,string) newobj instance void class G2_C808`2<class BaseClass0,class BaseClass0>::.ctor() stloc.0 ldloc.0 ldstr "G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G1_C15::Method7.4885<System.Object>()#" call void Generated1399::M.G1_C15.T.T<class BaseClass0,class BaseClass1,class G2_C808`2<class BaseClass0,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G1_C15::Method7.4885<System.Object>()#" call void Generated1399::M.G1_C15.A.T<class BaseClass1,class G2_C808`2<class BaseClass0,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G1_C15::Method7.4885<System.Object>()#" call void Generated1399::M.G1_C15.A.B<class G2_C808`2<class BaseClass0,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C15::Method7.MI.4886<System.Object>()#" call void Generated1399::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G2_C808`2<class BaseClass0,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G1_C15::Method7.MI.4886<System.Object>()#" call void Generated1399::M.IBase2.B.T<class BaseClass1,class G2_C808`2<class BaseClass0,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C15::Method7.MI.4886<System.Object>()#" call void Generated1399::M.IBase2.B.B<class G2_C808`2<class BaseClass0,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C15::Method4.MI.4888()#G1_C15::Method5.MI.4890()#G1_C15::Method6.MI.4892<System.Object>()#" call void Generated1399::M.IBase1.T<class BaseClass0,class G2_C808`2<class BaseClass0,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C15::Method4.MI.4888()#G1_C15::Method5.MI.4890()#G1_C15::Method6.MI.4892<System.Object>()#" call void Generated1399::M.IBase1.A<class G2_C808`2<class BaseClass0,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C15::Method7.MI.4886<System.Object>()#" call void Generated1399::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G2_C808`2<class BaseClass0,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G1_C15::Method7.MI.4886<System.Object>()#" call void Generated1399::M.IBase2.A.T<class BaseClass1,class G2_C808`2<class BaseClass0,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C15::Method7.MI.4886<System.Object>()#" call void Generated1399::M.IBase2.A.B<class G2_C808`2<class BaseClass0,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C808::ClassMethod2999.12469<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G1_C15::Method7.4885<System.Object>()#" call void Generated1399::M.G2_C808.T.T<class BaseClass0,class BaseClass0,class G2_C808`2<class BaseClass0,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C808::ClassMethod2999.12469<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G1_C15::Method7.4885<System.Object>()#" call void Generated1399::M.G2_C808.A.T<class BaseClass0,class G2_C808`2<class BaseClass0,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C808::ClassMethod2999.12469<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G1_C15::Method7.4885<System.Object>()#" call void Generated1399::M.G2_C808.A.A<class G2_C808`2<class BaseClass0,class BaseClass0>>(!!0,string) newobj instance void class G2_C808`2<class BaseClass0,class BaseClass1>::.ctor() stloc.0 ldloc.0 ldstr "G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G1_C15::Method7.4885<System.Object>()#" call void Generated1399::M.G1_C15.T.T<class BaseClass0,class BaseClass1,class G2_C808`2<class BaseClass0,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G1_C15::Method7.4885<System.Object>()#" call void Generated1399::M.G1_C15.A.T<class BaseClass1,class G2_C808`2<class BaseClass0,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G1_C15::Method7.4885<System.Object>()#" call void Generated1399::M.G1_C15.A.B<class G2_C808`2<class BaseClass0,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C15::Method7.MI.4886<System.Object>()#" call void Generated1399::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G2_C808`2<class BaseClass0,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G1_C15::Method7.MI.4886<System.Object>()#" call void Generated1399::M.IBase2.B.T<class BaseClass1,class G2_C808`2<class BaseClass0,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C15::Method7.MI.4886<System.Object>()#" call void Generated1399::M.IBase2.B.B<class G2_C808`2<class BaseClass0,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C15::Method4.MI.4888()#G1_C15::Method5.MI.4890()#G1_C15::Method6.MI.4892<System.Object>()#" call void Generated1399::M.IBase1.T<class BaseClass0,class G2_C808`2<class BaseClass0,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C15::Method4.MI.4888()#G1_C15::Method5.MI.4890()#G1_C15::Method6.MI.4892<System.Object>()#" call void Generated1399::M.IBase1.A<class G2_C808`2<class BaseClass0,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C15::Method7.MI.4886<System.Object>()#" call void Generated1399::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G2_C808`2<class BaseClass0,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G1_C15::Method7.MI.4886<System.Object>()#" call void Generated1399::M.IBase2.A.T<class BaseClass1,class G2_C808`2<class BaseClass0,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C15::Method7.MI.4886<System.Object>()#" call void Generated1399::M.IBase2.A.B<class G2_C808`2<class BaseClass0,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C808::ClassMethod2999.12469<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G1_C15::Method7.4885<System.Object>()#" call void Generated1399::M.G2_C808.T.T<class BaseClass0,class BaseClass1,class G2_C808`2<class BaseClass0,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G2_C808::ClassMethod2999.12469<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G1_C15::Method7.4885<System.Object>()#" call void Generated1399::M.G2_C808.A.T<class BaseClass1,class G2_C808`2<class BaseClass0,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C808::ClassMethod2999.12469<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G1_C15::Method7.4885<System.Object>()#" call void Generated1399::M.G2_C808.A.B<class G2_C808`2<class BaseClass0,class BaseClass1>>(!!0,string) newobj instance void class G2_C808`2<class BaseClass1,class BaseClass0>::.ctor() stloc.0 ldloc.0 ldstr "G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G1_C15::Method7.4885<System.Object>()#" call void Generated1399::M.G1_C15.T.T<class BaseClass0,class BaseClass1,class G2_C808`2<class BaseClass1,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G1_C15::Method7.4885<System.Object>()#" call void Generated1399::M.G1_C15.A.T<class BaseClass1,class G2_C808`2<class BaseClass1,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G1_C15::Method7.4885<System.Object>()#" call void Generated1399::M.G1_C15.A.B<class G2_C808`2<class BaseClass1,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C15::Method7.MI.4886<System.Object>()#" call void Generated1399::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G2_C808`2<class BaseClass1,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G1_C15::Method7.MI.4886<System.Object>()#" call void Generated1399::M.IBase2.B.T<class BaseClass1,class G2_C808`2<class BaseClass1,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C15::Method7.MI.4886<System.Object>()#" call void Generated1399::M.IBase2.B.B<class G2_C808`2<class BaseClass1,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C15::Method4.MI.4888()#G1_C15::Method5.MI.4890()#G1_C15::Method6.MI.4892<System.Object>()#" call void Generated1399::M.IBase1.T<class BaseClass0,class G2_C808`2<class BaseClass1,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C15::Method4.MI.4888()#G1_C15::Method5.MI.4890()#G1_C15::Method6.MI.4892<System.Object>()#" call void Generated1399::M.IBase1.A<class G2_C808`2<class BaseClass1,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C15::Method7.MI.4886<System.Object>()#" call void Generated1399::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G2_C808`2<class BaseClass1,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G1_C15::Method7.MI.4886<System.Object>()#" call void Generated1399::M.IBase2.A.T<class BaseClass1,class G2_C808`2<class BaseClass1,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C15::Method7.MI.4886<System.Object>()#" call void Generated1399::M.IBase2.A.B<class G2_C808`2<class BaseClass1,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C808::ClassMethod2999.12469<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G1_C15::Method7.4885<System.Object>()#" call void Generated1399::M.G2_C808.T.T<class BaseClass1,class BaseClass0,class G2_C808`2<class BaseClass1,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C808::ClassMethod2999.12469<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G1_C15::Method7.4885<System.Object>()#" call void Generated1399::M.G2_C808.B.T<class BaseClass0,class G2_C808`2<class BaseClass1,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C808::ClassMethod2999.12469<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G1_C15::Method7.4885<System.Object>()#" call void Generated1399::M.G2_C808.B.A<class G2_C808`2<class BaseClass1,class BaseClass0>>(!!0,string) newobj instance void class G2_C808`2<class BaseClass1,class BaseClass1>::.ctor() stloc.0 ldloc.0 ldstr "G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G1_C15::Method7.4885<System.Object>()#" call void Generated1399::M.G1_C15.T.T<class BaseClass0,class BaseClass1,class G2_C808`2<class BaseClass1,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G1_C15::Method7.4885<System.Object>()#" call void Generated1399::M.G1_C15.A.T<class BaseClass1,class G2_C808`2<class BaseClass1,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G1_C15::Method7.4885<System.Object>()#" call void Generated1399::M.G1_C15.A.B<class G2_C808`2<class BaseClass1,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C15::Method7.MI.4886<System.Object>()#" call void Generated1399::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G2_C808`2<class BaseClass1,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G1_C15::Method7.MI.4886<System.Object>()#" call void Generated1399::M.IBase2.B.T<class BaseClass1,class G2_C808`2<class BaseClass1,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C15::Method7.MI.4886<System.Object>()#" call void Generated1399::M.IBase2.B.B<class G2_C808`2<class BaseClass1,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C15::Method4.MI.4888()#G1_C15::Method5.MI.4890()#G1_C15::Method6.MI.4892<System.Object>()#" call void Generated1399::M.IBase1.T<class BaseClass0,class G2_C808`2<class BaseClass1,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C15::Method4.MI.4888()#G1_C15::Method5.MI.4890()#G1_C15::Method6.MI.4892<System.Object>()#" call void Generated1399::M.IBase1.A<class G2_C808`2<class BaseClass1,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C15::Method7.MI.4886<System.Object>()#" call void Generated1399::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G2_C808`2<class BaseClass1,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G1_C15::Method7.MI.4886<System.Object>()#" call void Generated1399::M.IBase2.A.T<class BaseClass1,class G2_C808`2<class BaseClass1,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C15::Method7.MI.4886<System.Object>()#" call void Generated1399::M.IBase2.A.B<class G2_C808`2<class BaseClass1,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C808::ClassMethod2999.12469<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G1_C15::Method7.4885<System.Object>()#" call void Generated1399::M.G2_C808.T.T<class BaseClass1,class BaseClass1,class G2_C808`2<class BaseClass1,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G2_C808::ClassMethod2999.12469<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G1_C15::Method7.4885<System.Object>()#" call void Generated1399::M.G2_C808.B.T<class BaseClass1,class G2_C808`2<class BaseClass1,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C808::ClassMethod2999.12469<System.Object>()#G1_C15::Method4.4887()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G1_C15::Method7.4885<System.Object>()#" call void Generated1399::M.G2_C808.B.B<class G2_C808`2<class BaseClass1,class BaseClass1>>(!!0,string) ldstr "========================================================================\n\n" call void [mscorlib]System.Console::WriteLine(string) ret } .method public hidebysig static void StructConstrainedInterfaceCallsTest() cil managed { .maxstack 10 ldstr "===================== Struct Constrained Interface Calls Test =====================" call void [mscorlib]System.Console::WriteLine(string) ldstr "========================================================================\n\n" call void [mscorlib]System.Console::WriteLine(string) ret } .method public hidebysig static void CalliTest() cil managed { .maxstack 10 .locals init (object V_0) ldstr "========================== Method Calli Test ==========================" call void [mscorlib]System.Console::WriteLine(string) newobj instance void class G3_C1871`1<class BaseClass0>::.ctor() stloc.0 ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method6<object>() calli default string(class G3_C1871`1<class BaseClass0>) ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method5() calli default string(class G3_C1871`1<class BaseClass0>) ldstr "G1_C15::Method5.4889()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method4() calli default string(class G3_C1871`1<class BaseClass0>) ldstr "G1_C15::Method4.4887()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G3_C1871`1<class BaseClass0>) ldstr "G1_C15::Method7.4885<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G3_C1871`1<class BaseClass0>) ldstr "G1_C15::Method7.MI.4886<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4() calli default string(class G3_C1871`1<class BaseClass0>) ldstr "G1_C15::Method4.MI.4888()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5() calli default string(class G3_C1871`1<class BaseClass0>) ldstr "G1_C15::Method5.MI.4890()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>() calli default string(class G3_C1871`1<class BaseClass0>) ldstr "G1_C15::Method6.MI.4892<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G3_C1871`1<class BaseClass0>) ldstr "G3_C1871::Method7.18455<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C808`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C808`2<class BaseClass1,class BaseClass1>::ClassMethod2999<object>() calli default string(class G3_C1871`1<class BaseClass0>) ldstr "G2_C808::ClassMethod2999.12469<System.Object>()" ldstr "class G2_C808`2<class BaseClass1,class BaseClass1> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C808`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C808`2<class BaseClass1,class BaseClass1>::Method6<object>() calli default string(class G3_C1871`1<class BaseClass0>) ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G2_C808`2<class BaseClass1,class BaseClass1> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C808`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C808`2<class BaseClass1,class BaseClass1>::Method5() calli default string(class G3_C1871`1<class BaseClass0>) ldstr "G1_C15::Method5.4889()" ldstr "class G2_C808`2<class BaseClass1,class BaseClass1> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C808`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C808`2<class BaseClass1,class BaseClass1>::Method4() calli default string(class G3_C1871`1<class BaseClass0>) ldstr "G1_C15::Method4.4887()" ldstr "class G2_C808`2<class BaseClass1,class BaseClass1> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C808`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C808`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G3_C1871`1<class BaseClass0>) ldstr "G1_C15::Method7.4885<System.Object>()" ldstr "class G2_C808`2<class BaseClass1,class BaseClass1> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1871`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1871`1<class BaseClass0>::ClassMethod5136<object>() calli default string(class G3_C1871`1<class BaseClass0>) ldstr "G3_C1871::ClassMethod5136.18458<System.Object>()" ldstr "class G3_C1871`1<class BaseClass0> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1871`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1871`1<class BaseClass0>::ClassMethod5135<object>() calli default string(class G3_C1871`1<class BaseClass0>) ldstr "G3_C1871::ClassMethod5135.18457<System.Object>()" ldstr "class G3_C1871`1<class BaseClass0> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1871`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1871`1<class BaseClass0>::ClassMethod5134() calli default string(class G3_C1871`1<class BaseClass0>) ldstr "G3_C1871::ClassMethod5134.18456()" ldstr "class G3_C1871`1<class BaseClass0> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1871`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1871`1<class BaseClass0>::Method7<object>() calli default string(class G3_C1871`1<class BaseClass0>) ldstr "G3_C1871::Method7.18455<System.Object>()" ldstr "class G3_C1871`1<class BaseClass0> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1871`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1871`1<class BaseClass0>::ClassMethod2999<object>() calli default string(class G3_C1871`1<class BaseClass0>) ldstr "G2_C808::ClassMethod2999.12469<System.Object>()" ldstr "class G3_C1871`1<class BaseClass0> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1871`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1871`1<class BaseClass0>::Method6<object>() calli default string(class G3_C1871`1<class BaseClass0>) ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G3_C1871`1<class BaseClass0> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1871`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1871`1<class BaseClass0>::Method5() calli default string(class G3_C1871`1<class BaseClass0>) ldstr "G1_C15::Method5.4889()" ldstr "class G3_C1871`1<class BaseClass0> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1871`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1871`1<class BaseClass0>::Method4() calli default string(class G3_C1871`1<class BaseClass0>) ldstr "G1_C15::Method4.4887()" ldstr "class G3_C1871`1<class BaseClass0> on type class G3_C1871`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) newobj instance void class G3_C1871`1<class BaseClass1>::.ctor() stloc.0 ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method6<object>() calli default string(class G3_C1871`1<class BaseClass1>) ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method5() calli default string(class G3_C1871`1<class BaseClass1>) ldstr "G1_C15::Method5.4889()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method4() calli default string(class G3_C1871`1<class BaseClass1>) ldstr "G1_C15::Method4.4887()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G3_C1871`1<class BaseClass1>) ldstr "G1_C15::Method7.4885<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G3_C1871`1<class BaseClass1>) ldstr "G3_C1871::Method7.18455<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4() calli default string(class G3_C1871`1<class BaseClass1>) ldstr "G1_C15::Method4.MI.4888()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5() calli default string(class G3_C1871`1<class BaseClass1>) ldstr "G1_C15::Method5.MI.4890()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>() calli default string(class G3_C1871`1<class BaseClass1>) ldstr "G1_C15::Method6.MI.4892<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G3_C1871`1<class BaseClass1>) ldstr "G3_C1871::Method7.18455<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C808`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C808`2<class BaseClass1,class BaseClass1>::ClassMethod2999<object>() calli default string(class G3_C1871`1<class BaseClass1>) ldstr "G2_C808::ClassMethod2999.12469<System.Object>()" ldstr "class G2_C808`2<class BaseClass1,class BaseClass1> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C808`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C808`2<class BaseClass1,class BaseClass1>::Method6<object>() calli default string(class G3_C1871`1<class BaseClass1>) ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G2_C808`2<class BaseClass1,class BaseClass1> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C808`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C808`2<class BaseClass1,class BaseClass1>::Method5() calli default string(class G3_C1871`1<class BaseClass1>) ldstr "G1_C15::Method5.4889()" ldstr "class G2_C808`2<class BaseClass1,class BaseClass1> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C808`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C808`2<class BaseClass1,class BaseClass1>::Method4() calli default string(class G3_C1871`1<class BaseClass1>) ldstr "G1_C15::Method4.4887()" ldstr "class G2_C808`2<class BaseClass1,class BaseClass1> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C808`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C808`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G3_C1871`1<class BaseClass1>) ldstr "G1_C15::Method7.4885<System.Object>()" ldstr "class G2_C808`2<class BaseClass1,class BaseClass1> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1871`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1871`1<class BaseClass1>::ClassMethod5136<object>() calli default string(class G3_C1871`1<class BaseClass1>) ldstr "G3_C1871::ClassMethod5136.18458<System.Object>()" ldstr "class G3_C1871`1<class BaseClass1> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1871`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1871`1<class BaseClass1>::ClassMethod5135<object>() calli default string(class G3_C1871`1<class BaseClass1>) ldstr "G3_C1871::ClassMethod5135.18457<System.Object>()" ldstr "class G3_C1871`1<class BaseClass1> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1871`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1871`1<class BaseClass1>::ClassMethod5134() calli default string(class G3_C1871`1<class BaseClass1>) ldstr "G3_C1871::ClassMethod5134.18456()" ldstr "class G3_C1871`1<class BaseClass1> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1871`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1871`1<class BaseClass1>::Method7<object>() calli default string(class G3_C1871`1<class BaseClass1>) ldstr "G3_C1871::Method7.18455<System.Object>()" ldstr "class G3_C1871`1<class BaseClass1> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1871`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1871`1<class BaseClass1>::ClassMethod2999<object>() calli default string(class G3_C1871`1<class BaseClass1>) ldstr "G2_C808::ClassMethod2999.12469<System.Object>()" ldstr "class G3_C1871`1<class BaseClass1> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1871`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1871`1<class BaseClass1>::Method6<object>() calli default string(class G3_C1871`1<class BaseClass1>) ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G3_C1871`1<class BaseClass1> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1871`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1871`1<class BaseClass1>::Method5() calli default string(class G3_C1871`1<class BaseClass1>) ldstr "G1_C15::Method5.4889()" ldstr "class G3_C1871`1<class BaseClass1> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1871`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1871`1<class BaseClass1>::Method4() calli default string(class G3_C1871`1<class BaseClass1>) ldstr "G1_C15::Method4.4887()" ldstr "class G3_C1871`1<class BaseClass1> on type class G3_C1871`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) newobj instance void class G2_C808`2<class BaseClass0,class BaseClass0>::.ctor() stloc.0 ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method6<object>() calli default string(class G2_C808`2<class BaseClass0,class BaseClass0>) ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method5() calli default string(class G2_C808`2<class BaseClass0,class BaseClass0>) ldstr "G1_C15::Method5.4889()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method4() calli default string(class G2_C808`2<class BaseClass0,class BaseClass0>) ldstr "G1_C15::Method4.4887()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G2_C808`2<class BaseClass0,class BaseClass0>) ldstr "G1_C15::Method7.4885<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G2_C808`2<class BaseClass0,class BaseClass0>) ldstr "G1_C15::Method7.MI.4886<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C808`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4() calli default string(class G2_C808`2<class BaseClass0,class BaseClass0>) ldstr "G1_C15::Method4.MI.4888()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C808`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5() calli default string(class G2_C808`2<class BaseClass0,class BaseClass0>) ldstr "G1_C15::Method5.MI.4890()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C808`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>() calli default string(class G2_C808`2<class BaseClass0,class BaseClass0>) ldstr "G1_C15::Method6.MI.4892<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C808`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G2_C808`2<class BaseClass0,class BaseClass0>) ldstr "G1_C15::Method7.MI.4886<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C808`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C808`2<class BaseClass0,class BaseClass0>::ClassMethod2999<object>() calli default string(class G2_C808`2<class BaseClass0,class BaseClass0>) ldstr "G2_C808::ClassMethod2999.12469<System.Object>()" ldstr "class G2_C808`2<class BaseClass0,class BaseClass0> on type class G2_C808`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C808`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C808`2<class BaseClass0,class BaseClass0>::Method6<object>() calli default string(class G2_C808`2<class BaseClass0,class BaseClass0>) ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G2_C808`2<class BaseClass0,class BaseClass0> on type class G2_C808`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C808`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C808`2<class BaseClass0,class BaseClass0>::Method5() calli default string(class G2_C808`2<class BaseClass0,class BaseClass0>) ldstr "G1_C15::Method5.4889()" ldstr "class G2_C808`2<class BaseClass0,class BaseClass0> on type class G2_C808`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C808`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C808`2<class BaseClass0,class BaseClass0>::Method4() calli default string(class G2_C808`2<class BaseClass0,class BaseClass0>) ldstr "G1_C15::Method4.4887()" ldstr "class G2_C808`2<class BaseClass0,class BaseClass0> on type class G2_C808`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C808`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C808`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(class G2_C808`2<class BaseClass0,class BaseClass0>) ldstr "G1_C15::Method7.4885<System.Object>()" ldstr "class G2_C808`2<class BaseClass0,class BaseClass0> on type class G2_C808`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) newobj instance void class G2_C808`2<class BaseClass0,class BaseClass1>::.ctor() stloc.0 ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method6<object>() calli default string(class G2_C808`2<class BaseClass0,class BaseClass1>) ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method5() calli default string(class G2_C808`2<class BaseClass0,class BaseClass1>) ldstr "G1_C15::Method5.4889()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method4() calli default string(class G2_C808`2<class BaseClass0,class BaseClass1>) ldstr "G1_C15::Method4.4887()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G2_C808`2<class BaseClass0,class BaseClass1>) ldstr "G1_C15::Method7.4885<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G2_C808`2<class BaseClass0,class BaseClass1>) ldstr "G1_C15::Method7.MI.4886<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C808`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4() calli default string(class G2_C808`2<class BaseClass0,class BaseClass1>) ldstr "G1_C15::Method4.MI.4888()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C808`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5() calli default string(class G2_C808`2<class BaseClass0,class BaseClass1>) ldstr "G1_C15::Method5.MI.4890()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C808`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>() calli default string(class G2_C808`2<class BaseClass0,class BaseClass1>) ldstr "G1_C15::Method6.MI.4892<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C808`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G2_C808`2<class BaseClass0,class BaseClass1>) ldstr "G1_C15::Method7.MI.4886<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C808`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C808`2<class BaseClass0,class BaseClass1>::ClassMethod2999<object>() calli default string(class G2_C808`2<class BaseClass0,class BaseClass1>) ldstr "G2_C808::ClassMethod2999.12469<System.Object>()" ldstr "class G2_C808`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C808`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C808`2<class BaseClass0,class BaseClass1>::Method6<object>() calli default string(class G2_C808`2<class BaseClass0,class BaseClass1>) ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G2_C808`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C808`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C808`2<class BaseClass0,class BaseClass1>::Method5() calli default string(class G2_C808`2<class BaseClass0,class BaseClass1>) ldstr "G1_C15::Method5.4889()" ldstr "class G2_C808`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C808`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C808`2<class BaseClass0,class BaseClass1>::Method4() calli default string(class G2_C808`2<class BaseClass0,class BaseClass1>) ldstr "G1_C15::Method4.4887()" ldstr "class G2_C808`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C808`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C808`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G2_C808`2<class BaseClass0,class BaseClass1>) ldstr "G1_C15::Method7.4885<System.Object>()" ldstr "class G2_C808`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) newobj instance void class G2_C808`2<class BaseClass1,class BaseClass0>::.ctor() stloc.0 ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method6<object>() calli default string(class G2_C808`2<class BaseClass1,class BaseClass0>) ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method5() calli default string(class G2_C808`2<class BaseClass1,class BaseClass0>) ldstr "G1_C15::Method5.4889()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method4() calli default string(class G2_C808`2<class BaseClass1,class BaseClass0>) ldstr "G1_C15::Method4.4887()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G2_C808`2<class BaseClass1,class BaseClass0>) ldstr "G1_C15::Method7.4885<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G2_C808`2<class BaseClass1,class BaseClass0>) ldstr "G1_C15::Method7.MI.4886<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C808`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4() calli default string(class G2_C808`2<class BaseClass1,class BaseClass0>) ldstr "G1_C15::Method4.MI.4888()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C808`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5() calli default string(class G2_C808`2<class BaseClass1,class BaseClass0>) ldstr "G1_C15::Method5.MI.4890()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C808`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>() calli default string(class G2_C808`2<class BaseClass1,class BaseClass0>) ldstr "G1_C15::Method6.MI.4892<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C808`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G2_C808`2<class BaseClass1,class BaseClass0>) ldstr "G1_C15::Method7.MI.4886<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C808`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C808`2<class BaseClass1,class BaseClass0>::ClassMethod2999<object>() calli default string(class G2_C808`2<class BaseClass1,class BaseClass0>) ldstr "G2_C808::ClassMethod2999.12469<System.Object>()" ldstr "class G2_C808`2<class BaseClass1,class BaseClass0> on type class G2_C808`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C808`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C808`2<class BaseClass1,class BaseClass0>::Method6<object>() calli default string(class G2_C808`2<class BaseClass1,class BaseClass0>) ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G2_C808`2<class BaseClass1,class BaseClass0> on type class G2_C808`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C808`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C808`2<class BaseClass1,class BaseClass0>::Method5() calli default string(class G2_C808`2<class BaseClass1,class BaseClass0>) ldstr "G1_C15::Method5.4889()" ldstr "class G2_C808`2<class BaseClass1,class BaseClass0> on type class G2_C808`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C808`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C808`2<class BaseClass1,class BaseClass0>::Method4() calli default string(class G2_C808`2<class BaseClass1,class BaseClass0>) ldstr "G1_C15::Method4.4887()" ldstr "class G2_C808`2<class BaseClass1,class BaseClass0> on type class G2_C808`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C808`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C808`2<class BaseClass1,class BaseClass0>::Method7<object>() calli default string(class G2_C808`2<class BaseClass1,class BaseClass0>) ldstr "G1_C15::Method7.4885<System.Object>()" ldstr "class G2_C808`2<class BaseClass1,class BaseClass0> on type class G2_C808`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) newobj instance void class G2_C808`2<class BaseClass1,class BaseClass1>::.ctor() stloc.0 ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method6<object>() calli default string(class G2_C808`2<class BaseClass1,class BaseClass1>) ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method5() calli default string(class G2_C808`2<class BaseClass1,class BaseClass1>) ldstr "G1_C15::Method5.4889()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method4() calli default string(class G2_C808`2<class BaseClass1,class BaseClass1>) ldstr "G1_C15::Method4.4887()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C15`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G2_C808`2<class BaseClass1,class BaseClass1>) ldstr "G1_C15::Method7.4885<System.Object>()" ldstr "class G1_C15`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G2_C808`2<class BaseClass1,class BaseClass1>) ldstr "G1_C15::Method7.MI.4886<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C808`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4() calli default string(class G2_C808`2<class BaseClass1,class BaseClass1>) ldstr "G1_C15::Method4.MI.4888()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C808`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5() calli default string(class G2_C808`2<class BaseClass1,class BaseClass1>) ldstr "G1_C15::Method5.MI.4890()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C808`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>() calli default string(class G2_C808`2<class BaseClass1,class BaseClass1>) ldstr "G1_C15::Method6.MI.4892<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C808`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G2_C808`2<class BaseClass1,class BaseClass1>) ldstr "G1_C15::Method7.MI.4886<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C808`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C808`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C808`2<class BaseClass1,class BaseClass1>::ClassMethod2999<object>() calli default string(class G2_C808`2<class BaseClass1,class BaseClass1>) ldstr "G2_C808::ClassMethod2999.12469<System.Object>()" ldstr "class G2_C808`2<class BaseClass1,class BaseClass1> on type class G2_C808`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C808`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C808`2<class BaseClass1,class BaseClass1>::Method6<object>() calli default string(class G2_C808`2<class BaseClass1,class BaseClass1>) ldstr "G1_C15::Method6.4891<System.Object>()" ldstr "class G2_C808`2<class BaseClass1,class BaseClass1> on type class G2_C808`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C808`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C808`2<class BaseClass1,class BaseClass1>::Method5() calli default string(class G2_C808`2<class BaseClass1,class BaseClass1>) ldstr "G1_C15::Method5.4889()" ldstr "class G2_C808`2<class BaseClass1,class BaseClass1> on type class G2_C808`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C808`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C808`2<class BaseClass1,class BaseClass1>::Method4() calli default string(class G2_C808`2<class BaseClass1,class BaseClass1>) ldstr "G1_C15::Method4.4887()" ldstr "class G2_C808`2<class BaseClass1,class BaseClass1> on type class G2_C808`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C808`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C808`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G2_C808`2<class BaseClass1,class BaseClass1>) ldstr "G1_C15::Method7.4885<System.Object>()" ldstr "class G2_C808`2<class BaseClass1,class BaseClass1> on type class G2_C808`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldstr "========================================================================\n\n" call void [mscorlib]System.Console::WriteLine(string) ret } .method public hidebysig static int32 Main() cil managed { .custom instance void [xunit.core]Xunit.FactAttribute::.ctor() = ( 01 00 00 00 ) .entrypoint .maxstack 10 call void Generated1399::MethodCallingTest() call void Generated1399::ConstrainedCallsTest() call void Generated1399::StructConstrainedInterfaceCallsTest() call void Generated1399::CalliTest() ldc.i4 100 ret } }
-1
dotnet/runtime
66,235
Revert "Fix issues related to JsonSerializerOptions mutation and caching."
Reverts dotnet/runtime#65863 Fixes #66232
jkotas
2022-03-05T06:23:45Z
2022-03-05T14:08:07Z
17662fc30cfd4cc6e7dbba978a3fb512380c0b70
3ede8095da51d5d27a890020b61376155e9a61c2
Revert "Fix issues related to JsonSerializerOptions mutation and caching.". Reverts dotnet/runtime#65863 Fixes #66232
./src/libraries/System.Private.Xml.Linq/tests/TreeManipulation/XNodeRemove.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Collections.Generic; using System.Linq; using System.Xml.Linq; using CoreXml.Test.XLinq; using Microsoft.Test.ModuleCore; namespace XLinqTests { public abstract class XNodeRemove : XLinqTestCase { #region Fields private EventsHelper _eHelper; private bool _runWithEvents; #endregion #region Public Methods and Operators public void NodeWithNoParent() { _runWithEvents = (bool)Params[0]; XNode[] nodes = { new XElement("a"), new XElement("b", new XAttribute("id", "a0")), new XElement("c", new XAttribute("id", "a0"), new XElement("cc")), new XComment("comm"), new XProcessingInstruction("PI", ""), new XText(""), new XText(" "), new XText("normal"), new XCData("cdata"), new XDocument(), new XDocument(new XDeclaration("1.0", "UTF8", "true"), new XElement("c", new XAttribute("id", "a0"), new XElement("cc"))) }; foreach (XNode n in nodes) { try { if (_runWithEvents) { _eHelper = new EventsHelper(n); } n.Remove(); if (_runWithEvents) { _eHelper.Verify(XObjectChange.Remove, n); } TestLog.Compare(false, "Exception expected [" + n.NodeType + "] " + n); } catch (InvalidOperationException) { } } } public void OnDocument() { _runWithEvents = (bool)Params[0]; var itemCount = (int)Variation.Params[0]; var addDecl = (bool)Variation.Params[1]; object[] data = { new XDocumentType("root", null, null, null), new XElement("A"), new XElement("B", new XElement("x"), "string", new XAttribute("at", "home")), new XProcessingInstruction("PI1", ""), new XProcessingInstruction("PI2", ""), new XText(" "), new XText(" "), new XText(" "), new XComment("comment1"), new XComment("comment2") }; foreach (var nodes in data.NonRecursiveVariations(itemCount)) { if (nodes.Count(x => x is XElement) > 1 || nodes.CheckDTDAfterElement()) { continue; // double root elem check and dtd after elem check } int length = (new XDocument(nodes)).Nodes().Count(); for (int i = 0; i < length; i++) { XDocument doc = addDecl ? new XDocument(new XDeclaration("1.0", "UTF8", "true"), nodes) : new XDocument(nodes); XNode o = doc.Nodes().ElementAt(i); if (_runWithEvents) { _eHelper = new EventsHelper(doc); } DoRemoveTest(doc, i); if (_runWithEvents) { _eHelper.Verify(XObjectChange.Remove, o); } } } } public void OnElement() { _runWithEvents = (bool)Params[0]; var useParentElement = (bool)Variation.Params[0]; var useDocument = (bool)Variation.Params[1]; var itemCount = (int)Variation.Params[2]; object[] data = { new XElement("A"), new XElement("B", new XElement("X")), new XProcessingInstruction("PI1", ""), new XProcessingInstruction("PI2", ""), new XAttribute("id", "a0"), new XText("text"), new XText(""), new XText("text2"), new XCData("cdata1"), new XCData("cdata2"), null, "string1", "string2", new XComment("comment1"), new XComment("comment2") }; foreach (var nodes in data.NonRecursiveVariations(itemCount)) { int length = (new XElement("dummy", nodes)).Nodes().Count(); for (int i = 0; i < length; i++) { var elem = new XElement("X", nodes); XElement parent = null; if (useParentElement) { parent = new XElement("Parent", new XAttribute("id", "x07"), "text", elem, "text2"); } if (useDocument) { var doc = new XDocument(useParentElement ? parent : elem); } var elemCopy = new XElement(elem); XNode o = elem.Nodes().ElementAt(i); if (_runWithEvents) { _eHelper = new EventsHelper(elem); } DoRemoveTest(elem, i); if (_runWithEvents) { _eHelper.Verify(XObjectChange.Remove, o); } } } } public void RemoveNodesFromMixedContent() { int count = 0; _runWithEvents = (bool)Params[0]; XElement a = XElement.Parse(@"<A>t1<B/>t2</A>"); if (_runWithEvents) { _eHelper = new EventsHelper(a); count = a.Nodes().Skip(1).Count(); } a.Nodes().Skip(1).Remove(); if (_runWithEvents) { _eHelper.Verify(XObjectChange.Remove, count); } TestLog.Compare(a.Nodes().Count(), 1, "Wrong node count ..."); TestLog.Compare(a.FirstNode is XText, "text node"); TestLog.Compare((a.FirstNode as XText).Value, "t1", "text node value"); } public void UsagePattern1() { _runWithEvents = (bool)Params[0]; var e = new XElement("root", new XElement("b", new XAttribute("id", "a0")), new XElement("c", new XAttribute("id", "a0"), new XElement("cc")), new XComment("comm"), new XProcessingInstruction("PI", ""), new XText(""), new XElement("a"), new XText(" "), new XText("normal"), new XCData("cdata")); // Need to do snapshot -> otherwise will stop on the first node when Removed. if (_runWithEvents) { _eHelper = new EventsHelper(e); } foreach (XNode n in e.Nodes().ToList()) { n.Remove(); if (_runWithEvents) { _eHelper.Verify(XObjectChange.Remove, n); } } TestLog.Compare(e.Nodes().IsEmpty(), "nodes Removed"); } #endregion #region Methods private void DoRemoveTest(XContainer elem, int position) { List<ExpectedValue> expectedData = elem.Nodes().Take(position).Concat(elem.Nodes().Skip(position + 1)).Select(n => new ExpectedValue(!(n is XText), n)).ProcessNodes().ToList(); XNode toRemove = elem.Nodes().ElementAt(position); toRemove.Remove(); TestLog.Compare(toRemove.Parent == null, "Parent of Removed"); TestLog.Compare(toRemove.Document == null, "Document of Removed"); TestLog.Compare(toRemove.NextNode == null, "NextNode"); TestLog.Compare(toRemove.PreviousNode == null, "PreviousNode"); if (toRemove is XContainer) { foreach (XNode child in (toRemove as XContainer).Nodes()) { TestLog.Compare(child.Document == null, "Document of child of Removed"); TestLog.Compare(child.Parent == toRemove, "Parent of child of Removed should be set"); } } // try Remove Removed node try { toRemove.Remove(); TestLog.Compare(false, "Exception expected [" + toRemove.NodeType + "] " + toRemove); } catch (InvalidOperationException) { } TestLog.Compare(expectedData.EqualAll(elem.Nodes(), XNode.EqualityComparer), "The rest of the tree - Nodes()"); } #endregion } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Collections.Generic; using System.Linq; using System.Xml.Linq; using CoreXml.Test.XLinq; using Microsoft.Test.ModuleCore; namespace XLinqTests { public abstract class XNodeRemove : XLinqTestCase { #region Fields private EventsHelper _eHelper; private bool _runWithEvents; #endregion #region Public Methods and Operators public void NodeWithNoParent() { _runWithEvents = (bool)Params[0]; XNode[] nodes = { new XElement("a"), new XElement("b", new XAttribute("id", "a0")), new XElement("c", new XAttribute("id", "a0"), new XElement("cc")), new XComment("comm"), new XProcessingInstruction("PI", ""), new XText(""), new XText(" "), new XText("normal"), new XCData("cdata"), new XDocument(), new XDocument(new XDeclaration("1.0", "UTF8", "true"), new XElement("c", new XAttribute("id", "a0"), new XElement("cc"))) }; foreach (XNode n in nodes) { try { if (_runWithEvents) { _eHelper = new EventsHelper(n); } n.Remove(); if (_runWithEvents) { _eHelper.Verify(XObjectChange.Remove, n); } TestLog.Compare(false, "Exception expected [" + n.NodeType + "] " + n); } catch (InvalidOperationException) { } } } public void OnDocument() { _runWithEvents = (bool)Params[0]; var itemCount = (int)Variation.Params[0]; var addDecl = (bool)Variation.Params[1]; object[] data = { new XDocumentType("root", null, null, null), new XElement("A"), new XElement("B", new XElement("x"), "string", new XAttribute("at", "home")), new XProcessingInstruction("PI1", ""), new XProcessingInstruction("PI2", ""), new XText(" "), new XText(" "), new XText(" "), new XComment("comment1"), new XComment("comment2") }; foreach (var nodes in data.NonRecursiveVariations(itemCount)) { if (nodes.Count(x => x is XElement) > 1 || nodes.CheckDTDAfterElement()) { continue; // double root elem check and dtd after elem check } int length = (new XDocument(nodes)).Nodes().Count(); for (int i = 0; i < length; i++) { XDocument doc = addDecl ? new XDocument(new XDeclaration("1.0", "UTF8", "true"), nodes) : new XDocument(nodes); XNode o = doc.Nodes().ElementAt(i); if (_runWithEvents) { _eHelper = new EventsHelper(doc); } DoRemoveTest(doc, i); if (_runWithEvents) { _eHelper.Verify(XObjectChange.Remove, o); } } } } public void OnElement() { _runWithEvents = (bool)Params[0]; var useParentElement = (bool)Variation.Params[0]; var useDocument = (bool)Variation.Params[1]; var itemCount = (int)Variation.Params[2]; object[] data = { new XElement("A"), new XElement("B", new XElement("X")), new XProcessingInstruction("PI1", ""), new XProcessingInstruction("PI2", ""), new XAttribute("id", "a0"), new XText("text"), new XText(""), new XText("text2"), new XCData("cdata1"), new XCData("cdata2"), null, "string1", "string2", new XComment("comment1"), new XComment("comment2") }; foreach (var nodes in data.NonRecursiveVariations(itemCount)) { int length = (new XElement("dummy", nodes)).Nodes().Count(); for (int i = 0; i < length; i++) { var elem = new XElement("X", nodes); XElement parent = null; if (useParentElement) { parent = new XElement("Parent", new XAttribute("id", "x07"), "text", elem, "text2"); } if (useDocument) { var doc = new XDocument(useParentElement ? parent : elem); } var elemCopy = new XElement(elem); XNode o = elem.Nodes().ElementAt(i); if (_runWithEvents) { _eHelper = new EventsHelper(elem); } DoRemoveTest(elem, i); if (_runWithEvents) { _eHelper.Verify(XObjectChange.Remove, o); } } } } public void RemoveNodesFromMixedContent() { int count = 0; _runWithEvents = (bool)Params[0]; XElement a = XElement.Parse(@"<A>t1<B/>t2</A>"); if (_runWithEvents) { _eHelper = new EventsHelper(a); count = a.Nodes().Skip(1).Count(); } a.Nodes().Skip(1).Remove(); if (_runWithEvents) { _eHelper.Verify(XObjectChange.Remove, count); } TestLog.Compare(a.Nodes().Count(), 1, "Wrong node count ..."); TestLog.Compare(a.FirstNode is XText, "text node"); TestLog.Compare((a.FirstNode as XText).Value, "t1", "text node value"); } public void UsagePattern1() { _runWithEvents = (bool)Params[0]; var e = new XElement("root", new XElement("b", new XAttribute("id", "a0")), new XElement("c", new XAttribute("id", "a0"), new XElement("cc")), new XComment("comm"), new XProcessingInstruction("PI", ""), new XText(""), new XElement("a"), new XText(" "), new XText("normal"), new XCData("cdata")); // Need to do snapshot -> otherwise will stop on the first node when Removed. if (_runWithEvents) { _eHelper = new EventsHelper(e); } foreach (XNode n in e.Nodes().ToList()) { n.Remove(); if (_runWithEvents) { _eHelper.Verify(XObjectChange.Remove, n); } } TestLog.Compare(e.Nodes().IsEmpty(), "nodes Removed"); } #endregion #region Methods private void DoRemoveTest(XContainer elem, int position) { List<ExpectedValue> expectedData = elem.Nodes().Take(position).Concat(elem.Nodes().Skip(position + 1)).Select(n => new ExpectedValue(!(n is XText), n)).ProcessNodes().ToList(); XNode toRemove = elem.Nodes().ElementAt(position); toRemove.Remove(); TestLog.Compare(toRemove.Parent == null, "Parent of Removed"); TestLog.Compare(toRemove.Document == null, "Document of Removed"); TestLog.Compare(toRemove.NextNode == null, "NextNode"); TestLog.Compare(toRemove.PreviousNode == null, "PreviousNode"); if (toRemove is XContainer) { foreach (XNode child in (toRemove as XContainer).Nodes()) { TestLog.Compare(child.Document == null, "Document of child of Removed"); TestLog.Compare(child.Parent == toRemove, "Parent of child of Removed should be set"); } } // try Remove Removed node try { toRemove.Remove(); TestLog.Compare(false, "Exception expected [" + toRemove.NodeType + "] " + toRemove); } catch (InvalidOperationException) { } TestLog.Compare(expectedData.EqualAll(elem.Nodes(), XNode.EqualityComparer), "The rest of the tree - Nodes()"); } #endregion } }
-1
dotnet/runtime
66,235
Revert "Fix issues related to JsonSerializerOptions mutation and caching."
Reverts dotnet/runtime#65863 Fixes #66232
jkotas
2022-03-05T06:23:45Z
2022-03-05T14:08:07Z
17662fc30cfd4cc6e7dbba978a3fb512380c0b70
3ede8095da51d5d27a890020b61376155e9a61c2
Revert "Fix issues related to JsonSerializerOptions mutation and caching.". Reverts dotnet/runtime#65863 Fixes #66232
./src/libraries/Microsoft.NETCore.Platforms/src/GenerateRuntimeGraph.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Collections.Generic; using System.IO; using System.Linq; using System.Text; using System.Xml.Linq; using Microsoft.Build.Framework; using Newtonsoft.Json; using NuGet.RuntimeModel; namespace Microsoft.NETCore.Platforms.BuildTasks { public class GenerateRuntimeGraph : BuildTask { /// <summary> /// A set of RuntimeGroups that can be used to generate a runtime graph /// Identity: the base string for the RID, without version architecture, or qualifiers. /// Parent: the base string for the parent of this RID. This RID will be imported by the baseRID, architecture-specific, /// and qualifier-specific RIDs (with the latter two appending appropriate architecture and qualifiers). /// Versions: A list of strings delimited by semi-colons that represent the versions for this RID. /// TreatVersionsAsCompatible: Default is true. When true, version-specific RIDs will import the previous /// version-specific RID in the Versions list, with the first version importing the version-less RID. /// When false all version-specific RIDs will import the version-less RID (bypassing previous version-specific RIDs) /// OmitVersionDelimiter: Default is false. When true no characters will separate the base RID and version (EG: win7). /// When false a '.' will separate the base RID and version (EG: osx.10.12). /// ApplyVersionsToParent: Default is false. When true, version-specific RIDs will import version-specific Parent RIDs /// similar to is done for architecture and qualifier (see Parent above). /// Architectures: A list of strings delimited by semi-colons that represent the architectures for this RID. /// AdditionalQualifiers: A list of strings delimited by semi-colons that represent the additional qualifiers for this RID. /// Additional qualifers do not stack, each only applies to the qualifier-less RIDs (so as not to cause combinatorial /// exponential growth of RIDs). /// /// The following options can be used under special circumstances but break the normal precedence rules we try to establish /// by generating the RID graph from common logic. These options make it possible to create a RID fallback chain that doesn't /// match the rest of the RIDs and therefore is hard for developers/package authors to reason about. /// Only use these options for cases where you know what you are doing and have carefully reviewed the resulting RID fallbacks /// using the CompatibliltyMap. /// OmitRIDs: A list of strings delimited by semi-colons that represent RIDs calculated from this RuntimeGroup that should /// be omitted from the RuntimeGraph. These RIDs will not be referenced nor defined. /// OmitRIDDefinitions: A list of strings delimited by semi-colons that represent RIDs calculated from this RuntimeGroup /// that should be omitted from the RuntimeGraph. These RIDs will not be defined by this RuntimeGroup, but will be /// referenced: useful in case some other RuntimeGroup (or runtime.json template) defines them. /// OmitRIDReferences: A list of strings delimited by semi-colons that represent RIDs calculated from this RuntimeGroup /// that should be omitted from the RuntimeGraph. These RIDs will be defined but not referenced by this RuntimeGroup. /// </summary> public ITaskItem[] RuntimeGroups { get; set; } /// <summary> /// Additional runtime identifiers to add to the graph. /// </summary> public string[] AdditionalRuntimeIdentifiers { get; set; } /// <summary> /// Parent RID to use for any unknown AdditionalRuntimeIdentifer. /// </summary> public string AdditionalRuntimeIdentifierParent { get; set; } /// <summary> /// Optional source Runtime.json to use as a starting point when merging additional RuntimeGroups /// </summary> public string SourceRuntimeJson { get; set; } /// <summary> /// Where to write the final runtime.json /// </summary> public string RuntimeJson { get; set; } /// <summary> /// Optionally, other runtime.jsons which may contain imported RIDs /// </summary> public string[] ExternalRuntimeJsons { get; set; } /// <summary> /// When defined, specifies the file to write compatibility precedence for each RID in the graph. /// </summary> public string CompatibilityMap { get; set; } /// <summary> /// True to write the generated runtime.json to RuntimeJson and compatibility map to CompatibilityMap, otherwise files are read and diffed /// with generated versions and an error is emitted if they differ. /// Setting UpdateRuntimeFiles will overwrite files even when the file is marked ReadOnly. /// </summary> public bool UpdateRuntimeFiles { get; set; } /// <summary> /// When defined, specifies the file to write a DGML representation of the runtime graph. /// </summary> public string RuntimeDirectedGraph { get; set; } public override bool Execute() { if (RuntimeGroups != null && RuntimeGroups.Any() && RuntimeJson == null) { Log.LogError($"{nameof(RuntimeJson)} argument must be specified when {nameof(RuntimeGroups)} is specified."); return false; } RuntimeGraph runtimeGraph; if (!string.IsNullOrEmpty(SourceRuntimeJson)) { if (!File.Exists(SourceRuntimeJson)) { Log.LogError($"{nameof(SourceRuntimeJson)} did not exist at {SourceRuntimeJson}."); return false; } runtimeGraph = JsonRuntimeFormat.ReadRuntimeGraph(SourceRuntimeJson); } else { runtimeGraph = new RuntimeGraph(); } List<RuntimeGroup> runtimeGroups = RuntimeGroups.NullAsEmpty().Select(i => new RuntimeGroup(i)).ToList(); AddRuntimeIdentifiers(runtimeGroups); foreach (var runtimeGroup in runtimeGroups) { runtimeGraph = SafeMerge(runtimeGraph, runtimeGroup); } Dictionary<string, string> externalRids = new Dictionary<string, string>(); if (ExternalRuntimeJsons != null) { foreach (var externalRuntimeJson in ExternalRuntimeJsons) { RuntimeGraph externalRuntimeGraph = JsonRuntimeFormat.ReadRuntimeGraph(externalRuntimeJson); foreach (var runtime in externalRuntimeGraph.Runtimes.Keys) { // don't check for duplicates, we merely care what is external externalRids.Add(runtime, externalRuntimeJson); } } } ValidateImports(runtimeGraph, externalRids); if (!string.IsNullOrEmpty(RuntimeJson)) { if (UpdateRuntimeFiles) { EnsureWritable(RuntimeJson); WriteRuntimeGraph(RuntimeJson, runtimeGraph); } else { // validate that existing file matches generated file if (!File.Exists(RuntimeJson)) { Log.LogError($"{nameof(RuntimeJson)} did not exist at {RuntimeJson} and {nameof(UpdateRuntimeFiles)} was not specified."); } else { var existingRuntimeGraph = JsonRuntimeFormat.ReadRuntimeGraph(RuntimeJson); if (!existingRuntimeGraph.Equals(runtimeGraph)) { Log.LogError($"The generated {nameof(RuntimeJson)} differs from {RuntimeJson} and {nameof(UpdateRuntimeFiles)} was not specified. Please specify {nameof(UpdateRuntimeFiles)}=true to commit the changes."); } } } } if (!string.IsNullOrEmpty(CompatibilityMap)) { var compatibilityMap = GetCompatibilityMap(runtimeGraph); if (UpdateRuntimeFiles) { EnsureWritable(CompatibilityMap); WriteCompatibilityMap(compatibilityMap, CompatibilityMap); } else { // validate that existing file matches generated file if (!File.Exists(CompatibilityMap)) { Log.LogError($"{nameof(CompatibilityMap)} did not exist at {CompatibilityMap} and {nameof(UpdateRuntimeFiles)} was not specified."); } else { var existingCompatibilityMap = ReadCompatibilityMap(CompatibilityMap); if (!CompatibilityMapEquals(existingCompatibilityMap, compatibilityMap)) { Log.LogError($"The generated {nameof(CompatibilityMap)} differs from {CompatibilityMap} and {nameof(UpdateRuntimeFiles)} was not specified. Please specify {nameof(UpdateRuntimeFiles)}=true to commit the changes."); } } } } if (!string.IsNullOrEmpty(RuntimeDirectedGraph)) { WriteRuntimeGraph(runtimeGraph, RuntimeDirectedGraph); } return !Log.HasLoggedErrors; } private void EnsureWritable(string file) { if (File.Exists(file)) { var existingAttributes = File.GetAttributes(file); if ((existingAttributes & FileAttributes.ReadOnly) != 0) { File.SetAttributes(file, existingAttributes &= ~FileAttributes.ReadOnly); } } } public static void WriteRuntimeGraph(string filePath, RuntimeGraph runtimeGraph) { using (var fileStream = new FileStream(filePath, FileMode.Create)) using (var textWriter = new StreamWriter(fileStream)) using (var jsonWriter = new JsonTextWriter(textWriter)) using (var writer = new JsonObjectWriter(jsonWriter)) { jsonWriter.Formatting = Formatting.Indented; // workaround https://github.com/NuGet/Home/issues/9532 writer.WriteObjectStart(); JsonRuntimeFormat.WriteRuntimeGraph(writer, runtimeGraph); writer.WriteObjectEnd(); } } private RuntimeGraph SafeMerge(RuntimeGraph existingGraph, RuntimeGroup runtimeGroup) { var runtimeGraph = runtimeGroup.GetRuntimeGraph(); foreach (var existingRuntimeDescription in existingGraph.Runtimes.Values) { RuntimeDescription newRuntimeDescription; if (runtimeGraph.Runtimes.TryGetValue(existingRuntimeDescription.RuntimeIdentifier, out newRuntimeDescription)) { // overlapping RID, ensure that the imports match (same ordering and content) if (!existingRuntimeDescription.InheritedRuntimes.SequenceEqual(newRuntimeDescription.InheritedRuntimes)) { Log.LogError($"RuntimeGroup {runtimeGroup.BaseRID} defines RID {newRuntimeDescription.RuntimeIdentifier} with imports {string.Join(";", newRuntimeDescription.InheritedRuntimes)} which differ from existing imports {string.Join(";", existingRuntimeDescription.InheritedRuntimes)}. You may avoid this by specifying {nameof(RuntimeGroup.OmitRIDDefinitions)} metadata with {newRuntimeDescription.RuntimeIdentifier}."); } } } return RuntimeGraph.Merge(existingGraph, runtimeGraph); } private void ValidateImports(RuntimeGraph runtimeGraph, IDictionary<string, string> externalRIDs) { foreach (var runtimeDescription in runtimeGraph.Runtimes.Values) { string externalRuntimeJson; if (externalRIDs.TryGetValue(runtimeDescription.RuntimeIdentifier, out externalRuntimeJson)) { Log.LogError($"Runtime {runtimeDescription.RuntimeIdentifier} is defined in both this RuntimeGraph and {externalRuntimeJson}."); } foreach (var import in runtimeDescription.InheritedRuntimes) { if (!runtimeGraph.Runtimes.ContainsKey(import) && !externalRIDs.ContainsKey(import)) { Log.LogError($"Runtime {runtimeDescription.RuntimeIdentifier} imports {import} which is not defined."); } } } } private void AddRuntimeIdentifiers(ICollection<RuntimeGroup> runtimeGroups) { if (AdditionalRuntimeIdentifiers == null || AdditionalRuntimeIdentifiers.Length == 0) { return; } RuntimeGroupCollection runtimeGroupCollection = new RuntimeGroupCollection(runtimeGroups); foreach (string additionalRuntimeIdentifier in AdditionalRuntimeIdentifiers) { runtimeGroupCollection.AddRuntimeIdentifier(additionalRuntimeIdentifier, AdditionalRuntimeIdentifierParent); } } private static IDictionary<string, IEnumerable<string>> GetCompatibilityMap(RuntimeGraph graph) { Dictionary<string, IEnumerable<string>> compatibilityMap = new Dictionary<string, IEnumerable<string>>(); foreach (var rid in graph.Runtimes.Keys.OrderBy(rid => rid, StringComparer.Ordinal)) { compatibilityMap.Add(rid, graph.ExpandRuntime(rid)); } return compatibilityMap; } private static IDictionary<string, IEnumerable<string>> ReadCompatibilityMap(string mapFile) { var serializer = new JsonSerializer(); using (var file = File.OpenText(mapFile)) using (var jsonTextReader = new JsonTextReader(file)) { return serializer.Deserialize<IDictionary<string, IEnumerable<string>>>(jsonTextReader); } } private static void WriteCompatibilityMap(IDictionary<string, IEnumerable<string>> compatibilityMap, string mapFile) { var serializer = new JsonSerializer() { Formatting = Formatting.Indented, StringEscapeHandling = StringEscapeHandling.EscapeNonAscii }; string directory = Path.GetDirectoryName(mapFile); if (!string.IsNullOrEmpty(directory) && !Directory.Exists(directory)) { Directory.CreateDirectory(directory); } using (var file = File.CreateText(mapFile)) { serializer.Serialize(file, compatibilityMap); } } private static bool CompatibilityMapEquals(IDictionary<string, IEnumerable<string>> left, IDictionary<string, IEnumerable<string>> right) { if (left.Count != right.Count) { return false; } foreach (var leftPair in left) { IEnumerable<string> rightValue; if (!right.TryGetValue(leftPair.Key, out rightValue)) { return false; } if (!rightValue.SequenceEqual(leftPair.Value)) { return false; } } return true; } private static XNamespace s_dgmlns = @"http://schemas.microsoft.com/vs/2009/dgml"; private static void WriteRuntimeGraph(RuntimeGraph graph, string dependencyGraphFilePath) { var doc = new XDocument(new XElement(s_dgmlns + "DirectedGraph")); var nodesElement = new XElement(s_dgmlns + "Nodes"); var linksElement = new XElement(s_dgmlns + "Links"); doc.Root.Add(nodesElement); doc.Root.Add(linksElement); foreach (var runtimeDescription in graph.Runtimes.Values) { nodesElement.Add(new XElement(s_dgmlns + "Node", new XAttribute("Id", runtimeDescription.RuntimeIdentifier))); foreach (var import in runtimeDescription.InheritedRuntimes) { linksElement.Add(new XElement(s_dgmlns + "Link", new XAttribute("Source", runtimeDescription.RuntimeIdentifier), new XAttribute("Target", import))); } } using (var file = File.Create(dependencyGraphFilePath)) { doc.Save(file); } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Collections.Generic; using System.IO; using System.Linq; using System.Text; using System.Xml.Linq; using Microsoft.Build.Framework; using Newtonsoft.Json; using NuGet.RuntimeModel; namespace Microsoft.NETCore.Platforms.BuildTasks { public class GenerateRuntimeGraph : BuildTask { /// <summary> /// A set of RuntimeGroups that can be used to generate a runtime graph /// Identity: the base string for the RID, without version architecture, or qualifiers. /// Parent: the base string for the parent of this RID. This RID will be imported by the baseRID, architecture-specific, /// and qualifier-specific RIDs (with the latter two appending appropriate architecture and qualifiers). /// Versions: A list of strings delimited by semi-colons that represent the versions for this RID. /// TreatVersionsAsCompatible: Default is true. When true, version-specific RIDs will import the previous /// version-specific RID in the Versions list, with the first version importing the version-less RID. /// When false all version-specific RIDs will import the version-less RID (bypassing previous version-specific RIDs) /// OmitVersionDelimiter: Default is false. When true no characters will separate the base RID and version (EG: win7). /// When false a '.' will separate the base RID and version (EG: osx.10.12). /// ApplyVersionsToParent: Default is false. When true, version-specific RIDs will import version-specific Parent RIDs /// similar to is done for architecture and qualifier (see Parent above). /// Architectures: A list of strings delimited by semi-colons that represent the architectures for this RID. /// AdditionalQualifiers: A list of strings delimited by semi-colons that represent the additional qualifiers for this RID. /// Additional qualifers do not stack, each only applies to the qualifier-less RIDs (so as not to cause combinatorial /// exponential growth of RIDs). /// /// The following options can be used under special circumstances but break the normal precedence rules we try to establish /// by generating the RID graph from common logic. These options make it possible to create a RID fallback chain that doesn't /// match the rest of the RIDs and therefore is hard for developers/package authors to reason about. /// Only use these options for cases where you know what you are doing and have carefully reviewed the resulting RID fallbacks /// using the CompatibliltyMap. /// OmitRIDs: A list of strings delimited by semi-colons that represent RIDs calculated from this RuntimeGroup that should /// be omitted from the RuntimeGraph. These RIDs will not be referenced nor defined. /// OmitRIDDefinitions: A list of strings delimited by semi-colons that represent RIDs calculated from this RuntimeGroup /// that should be omitted from the RuntimeGraph. These RIDs will not be defined by this RuntimeGroup, but will be /// referenced: useful in case some other RuntimeGroup (or runtime.json template) defines them. /// OmitRIDReferences: A list of strings delimited by semi-colons that represent RIDs calculated from this RuntimeGroup /// that should be omitted from the RuntimeGraph. These RIDs will be defined but not referenced by this RuntimeGroup. /// </summary> public ITaskItem[] RuntimeGroups { get; set; } /// <summary> /// Additional runtime identifiers to add to the graph. /// </summary> public string[] AdditionalRuntimeIdentifiers { get; set; } /// <summary> /// Parent RID to use for any unknown AdditionalRuntimeIdentifer. /// </summary> public string AdditionalRuntimeIdentifierParent { get; set; } /// <summary> /// Optional source Runtime.json to use as a starting point when merging additional RuntimeGroups /// </summary> public string SourceRuntimeJson { get; set; } /// <summary> /// Where to write the final runtime.json /// </summary> public string RuntimeJson { get; set; } /// <summary> /// Optionally, other runtime.jsons which may contain imported RIDs /// </summary> public string[] ExternalRuntimeJsons { get; set; } /// <summary> /// When defined, specifies the file to write compatibility precedence for each RID in the graph. /// </summary> public string CompatibilityMap { get; set; } /// <summary> /// True to write the generated runtime.json to RuntimeJson and compatibility map to CompatibilityMap, otherwise files are read and diffed /// with generated versions and an error is emitted if they differ. /// Setting UpdateRuntimeFiles will overwrite files even when the file is marked ReadOnly. /// </summary> public bool UpdateRuntimeFiles { get; set; } /// <summary> /// When defined, specifies the file to write a DGML representation of the runtime graph. /// </summary> public string RuntimeDirectedGraph { get; set; } public override bool Execute() { if (RuntimeGroups != null && RuntimeGroups.Any() && RuntimeJson == null) { Log.LogError($"{nameof(RuntimeJson)} argument must be specified when {nameof(RuntimeGroups)} is specified."); return false; } RuntimeGraph runtimeGraph; if (!string.IsNullOrEmpty(SourceRuntimeJson)) { if (!File.Exists(SourceRuntimeJson)) { Log.LogError($"{nameof(SourceRuntimeJson)} did not exist at {SourceRuntimeJson}."); return false; } runtimeGraph = JsonRuntimeFormat.ReadRuntimeGraph(SourceRuntimeJson); } else { runtimeGraph = new RuntimeGraph(); } List<RuntimeGroup> runtimeGroups = RuntimeGroups.NullAsEmpty().Select(i => new RuntimeGroup(i)).ToList(); AddRuntimeIdentifiers(runtimeGroups); foreach (var runtimeGroup in runtimeGroups) { runtimeGraph = SafeMerge(runtimeGraph, runtimeGroup); } Dictionary<string, string> externalRids = new Dictionary<string, string>(); if (ExternalRuntimeJsons != null) { foreach (var externalRuntimeJson in ExternalRuntimeJsons) { RuntimeGraph externalRuntimeGraph = JsonRuntimeFormat.ReadRuntimeGraph(externalRuntimeJson); foreach (var runtime in externalRuntimeGraph.Runtimes.Keys) { // don't check for duplicates, we merely care what is external externalRids.Add(runtime, externalRuntimeJson); } } } ValidateImports(runtimeGraph, externalRids); if (!string.IsNullOrEmpty(RuntimeJson)) { if (UpdateRuntimeFiles) { EnsureWritable(RuntimeJson); WriteRuntimeGraph(RuntimeJson, runtimeGraph); } else { // validate that existing file matches generated file if (!File.Exists(RuntimeJson)) { Log.LogError($"{nameof(RuntimeJson)} did not exist at {RuntimeJson} and {nameof(UpdateRuntimeFiles)} was not specified."); } else { var existingRuntimeGraph = JsonRuntimeFormat.ReadRuntimeGraph(RuntimeJson); if (!existingRuntimeGraph.Equals(runtimeGraph)) { Log.LogError($"The generated {nameof(RuntimeJson)} differs from {RuntimeJson} and {nameof(UpdateRuntimeFiles)} was not specified. Please specify {nameof(UpdateRuntimeFiles)}=true to commit the changes."); } } } } if (!string.IsNullOrEmpty(CompatibilityMap)) { var compatibilityMap = GetCompatibilityMap(runtimeGraph); if (UpdateRuntimeFiles) { EnsureWritable(CompatibilityMap); WriteCompatibilityMap(compatibilityMap, CompatibilityMap); } else { // validate that existing file matches generated file if (!File.Exists(CompatibilityMap)) { Log.LogError($"{nameof(CompatibilityMap)} did not exist at {CompatibilityMap} and {nameof(UpdateRuntimeFiles)} was not specified."); } else { var existingCompatibilityMap = ReadCompatibilityMap(CompatibilityMap); if (!CompatibilityMapEquals(existingCompatibilityMap, compatibilityMap)) { Log.LogError($"The generated {nameof(CompatibilityMap)} differs from {CompatibilityMap} and {nameof(UpdateRuntimeFiles)} was not specified. Please specify {nameof(UpdateRuntimeFiles)}=true to commit the changes."); } } } } if (!string.IsNullOrEmpty(RuntimeDirectedGraph)) { WriteRuntimeGraph(runtimeGraph, RuntimeDirectedGraph); } return !Log.HasLoggedErrors; } private void EnsureWritable(string file) { if (File.Exists(file)) { var existingAttributes = File.GetAttributes(file); if ((existingAttributes & FileAttributes.ReadOnly) != 0) { File.SetAttributes(file, existingAttributes &= ~FileAttributes.ReadOnly); } } } public static void WriteRuntimeGraph(string filePath, RuntimeGraph runtimeGraph) { using (var fileStream = new FileStream(filePath, FileMode.Create)) using (var textWriter = new StreamWriter(fileStream)) using (var jsonWriter = new JsonTextWriter(textWriter)) using (var writer = new JsonObjectWriter(jsonWriter)) { jsonWriter.Formatting = Formatting.Indented; // workaround https://github.com/NuGet/Home/issues/9532 writer.WriteObjectStart(); JsonRuntimeFormat.WriteRuntimeGraph(writer, runtimeGraph); writer.WriteObjectEnd(); } } private RuntimeGraph SafeMerge(RuntimeGraph existingGraph, RuntimeGroup runtimeGroup) { var runtimeGraph = runtimeGroup.GetRuntimeGraph(); foreach (var existingRuntimeDescription in existingGraph.Runtimes.Values) { RuntimeDescription newRuntimeDescription; if (runtimeGraph.Runtimes.TryGetValue(existingRuntimeDescription.RuntimeIdentifier, out newRuntimeDescription)) { // overlapping RID, ensure that the imports match (same ordering and content) if (!existingRuntimeDescription.InheritedRuntimes.SequenceEqual(newRuntimeDescription.InheritedRuntimes)) { Log.LogError($"RuntimeGroup {runtimeGroup.BaseRID} defines RID {newRuntimeDescription.RuntimeIdentifier} with imports {string.Join(";", newRuntimeDescription.InheritedRuntimes)} which differ from existing imports {string.Join(";", existingRuntimeDescription.InheritedRuntimes)}. You may avoid this by specifying {nameof(RuntimeGroup.OmitRIDDefinitions)} metadata with {newRuntimeDescription.RuntimeIdentifier}."); } } } return RuntimeGraph.Merge(existingGraph, runtimeGraph); } private void ValidateImports(RuntimeGraph runtimeGraph, IDictionary<string, string> externalRIDs) { foreach (var runtimeDescription in runtimeGraph.Runtimes.Values) { string externalRuntimeJson; if (externalRIDs.TryGetValue(runtimeDescription.RuntimeIdentifier, out externalRuntimeJson)) { Log.LogError($"Runtime {runtimeDescription.RuntimeIdentifier} is defined in both this RuntimeGraph and {externalRuntimeJson}."); } foreach (var import in runtimeDescription.InheritedRuntimes) { if (!runtimeGraph.Runtimes.ContainsKey(import) && !externalRIDs.ContainsKey(import)) { Log.LogError($"Runtime {runtimeDescription.RuntimeIdentifier} imports {import} which is not defined."); } } } } private void AddRuntimeIdentifiers(ICollection<RuntimeGroup> runtimeGroups) { if (AdditionalRuntimeIdentifiers == null || AdditionalRuntimeIdentifiers.Length == 0) { return; } RuntimeGroupCollection runtimeGroupCollection = new RuntimeGroupCollection(runtimeGroups); foreach (string additionalRuntimeIdentifier in AdditionalRuntimeIdentifiers) { runtimeGroupCollection.AddRuntimeIdentifier(additionalRuntimeIdentifier, AdditionalRuntimeIdentifierParent); } } private static IDictionary<string, IEnumerable<string>> GetCompatibilityMap(RuntimeGraph graph) { Dictionary<string, IEnumerable<string>> compatibilityMap = new Dictionary<string, IEnumerable<string>>(); foreach (var rid in graph.Runtimes.Keys.OrderBy(rid => rid, StringComparer.Ordinal)) { compatibilityMap.Add(rid, graph.ExpandRuntime(rid)); } return compatibilityMap; } private static IDictionary<string, IEnumerable<string>> ReadCompatibilityMap(string mapFile) { var serializer = new JsonSerializer(); using (var file = File.OpenText(mapFile)) using (var jsonTextReader = new JsonTextReader(file)) { return serializer.Deserialize<IDictionary<string, IEnumerable<string>>>(jsonTextReader); } } private static void WriteCompatibilityMap(IDictionary<string, IEnumerable<string>> compatibilityMap, string mapFile) { var serializer = new JsonSerializer() { Formatting = Formatting.Indented, StringEscapeHandling = StringEscapeHandling.EscapeNonAscii }; string directory = Path.GetDirectoryName(mapFile); if (!string.IsNullOrEmpty(directory) && !Directory.Exists(directory)) { Directory.CreateDirectory(directory); } using (var file = File.CreateText(mapFile)) { serializer.Serialize(file, compatibilityMap); } } private static bool CompatibilityMapEquals(IDictionary<string, IEnumerable<string>> left, IDictionary<string, IEnumerable<string>> right) { if (left.Count != right.Count) { return false; } foreach (var leftPair in left) { IEnumerable<string> rightValue; if (!right.TryGetValue(leftPair.Key, out rightValue)) { return false; } if (!rightValue.SequenceEqual(leftPair.Value)) { return false; } } return true; } private static XNamespace s_dgmlns = @"http://schemas.microsoft.com/vs/2009/dgml"; private static void WriteRuntimeGraph(RuntimeGraph graph, string dependencyGraphFilePath) { var doc = new XDocument(new XElement(s_dgmlns + "DirectedGraph")); var nodesElement = new XElement(s_dgmlns + "Nodes"); var linksElement = new XElement(s_dgmlns + "Links"); doc.Root.Add(nodesElement); doc.Root.Add(linksElement); foreach (var runtimeDescription in graph.Runtimes.Values) { nodesElement.Add(new XElement(s_dgmlns + "Node", new XAttribute("Id", runtimeDescription.RuntimeIdentifier))); foreach (var import in runtimeDescription.InheritedRuntimes) { linksElement.Add(new XElement(s_dgmlns + "Link", new XAttribute("Source", runtimeDescription.RuntimeIdentifier), new XAttribute("Target", import))); } } using (var file = File.Create(dependencyGraphFilePath)) { doc.Save(file); } } } }
-1
dotnet/runtime
66,235
Revert "Fix issues related to JsonSerializerOptions mutation and caching."
Reverts dotnet/runtime#65863 Fixes #66232
jkotas
2022-03-05T06:23:45Z
2022-03-05T14:08:07Z
17662fc30cfd4cc6e7dbba978a3fb512380c0b70
3ede8095da51d5d27a890020b61376155e9a61c2
Revert "Fix issues related to JsonSerializerOptions mutation and caching.". Reverts dotnet/runtime#65863 Fixes #66232
./src/libraries/System.Runtime/tests/System/Exception.Helpers.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using Xunit; namespace System.Tests { public static class ExceptionHelpers { public static void ValidateExceptionProperties(Exception e, int hResult, int dataCount = 0, string helpLink = null, Exception innerException = null, string message = null, string source = null, string stackTrace = null, bool validateMessage = true) { Assert.Equal(dataCount, e.Data.Count); Assert.Equal(helpLink, e.HelpLink); Assert.Equal(hResult, e.HResult); Assert.Equal(innerException, e.InnerException); if (validateMessage) { Assert.Equal(message, e.Message); } else { Assert.NotNull(e.Message); } Assert.Equal(source, e.Source); Assert.Equal(stackTrace, e.StackTrace); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using Xunit; namespace System.Tests { public static class ExceptionHelpers { public static void ValidateExceptionProperties(Exception e, int hResult, int dataCount = 0, string helpLink = null, Exception innerException = null, string message = null, string source = null, string stackTrace = null, bool validateMessage = true) { Assert.Equal(dataCount, e.Data.Count); Assert.Equal(helpLink, e.HelpLink); Assert.Equal(hResult, e.HResult); Assert.Equal(innerException, e.InnerException); if (validateMessage) { Assert.Equal(message, e.Message); } else { Assert.NotNull(e.Message); } Assert.Equal(source, e.Source); Assert.Equal(stackTrace, e.StackTrace); } } }
-1
dotnet/runtime
66,235
Revert "Fix issues related to JsonSerializerOptions mutation and caching."
Reverts dotnet/runtime#65863 Fixes #66232
jkotas
2022-03-05T06:23:45Z
2022-03-05T14:08:07Z
17662fc30cfd4cc6e7dbba978a3fb512380c0b70
3ede8095da51d5d27a890020b61376155e9a61c2
Revert "Fix issues related to JsonSerializerOptions mutation and caching.". Reverts dotnet/runtime#65863 Fixes #66232
./src/tests/baseservices/exceptions/generics/nested-try-catch10.csproj
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <AllowUnsafeBlocks>true</AllowUnsafeBlocks> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <ItemGroup> <Compile Include="nested-try-catch10.cs" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <AllowUnsafeBlocks>true</AllowUnsafeBlocks> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <ItemGroup> <Compile Include="nested-try-catch10.cs" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,235
Revert "Fix issues related to JsonSerializerOptions mutation and caching."
Reverts dotnet/runtime#65863 Fixes #66232
jkotas
2022-03-05T06:23:45Z
2022-03-05T14:08:07Z
17662fc30cfd4cc6e7dbba978a3fb512380c0b70
3ede8095da51d5d27a890020b61376155e9a61c2
Revert "Fix issues related to JsonSerializerOptions mutation and caching.". Reverts dotnet/runtime#65863 Fixes #66232
./Directory.Solution.props
<Project> <PropertyGroup> <!-- For solution restore, msbuild doesn't honor the property set in Directory.Build.props. --> <RestoreUseStaticGraphEvaluation>true</RestoreUseStaticGraphEvaluation> </PropertyGroup> </Project>
<Project> <PropertyGroup> <!-- For solution restore, msbuild doesn't honor the property set in Directory.Build.props. --> <RestoreUseStaticGraphEvaluation>true</RestoreUseStaticGraphEvaluation> </PropertyGroup> </Project>
-1
dotnet/runtime
66,235
Revert "Fix issues related to JsonSerializerOptions mutation and caching."
Reverts dotnet/runtime#65863 Fixes #66232
jkotas
2022-03-05T06:23:45Z
2022-03-05T14:08:07Z
17662fc30cfd4cc6e7dbba978a3fb512380c0b70
3ede8095da51d5d27a890020b61376155e9a61c2
Revert "Fix issues related to JsonSerializerOptions mutation and caching.". Reverts dotnet/runtime#65863 Fixes #66232
./src/tests/JIT/HardwareIntrinsics/X86/Avx2/Avx2_r.csproj
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <AllowUnsafeBlocks>true</AllowUnsafeBlocks> <!-- It takes a long time to complete (on a non-AVX machine) --> <UnloadabilityIncompatible>true</UnloadabilityIncompatible> <!-- https://github.com/dotnet/runtime/issues/12392 --> <GCStressIncompatible>true</GCStressIncompatible> </PropertyGroup> <PropertyGroup> <DebugType>Embedded</DebugType> <Optimize /> </PropertyGroup> <ItemGroup> <Compile Include="Add.Byte.cs" /> <Compile Include="Add.Int16.cs" /> <Compile Include="Add.Int32.cs" /> <Compile Include="Add.Int64.cs" /> <Compile Include="Add.SByte.cs" /> <Compile Include="Add.UInt16.cs" /> <Compile Include="Add.UInt32.cs" /> <Compile Include="Add.UInt64.cs" /> <Compile Include="AlignRight.SByte.5.cs" /> <Compile Include="AlignRight.SByte.27.cs" /> <Compile Include="AlignRight.SByte.228.cs" /> <Compile Include="AlignRight.SByte.250.cs" /> <Compile Include="AlignRight.Byte.5.cs" /> <Compile Include="AlignRight.Byte.27.cs" /> <Compile Include="AlignRight.Byte.228.cs" /> <Compile Include="AlignRight.Byte.250.cs" /> <Compile Include="AlignRight.Int16.0.cs" /> <Compile Include="AlignRight.Int16.2.cs" /> <Compile Include="AlignRight.UInt16.0.cs" /> <Compile Include="AlignRight.UInt16.2.cs" /> <Compile Include="AlignRight.Int32.0.cs" /> <Compile Include="AlignRight.Int32.4.cs" /> <Compile Include="AlignRight.UInt32.0.cs" /> <Compile Include="AlignRight.UInt32.4.cs" /> <Compile Include="AlignRight.Int64.0.cs" /> <Compile Include="AlignRight.Int64.8.cs" /> <Compile Include="AlignRight.UInt64.0.cs" /> <Compile Include="AlignRight.UInt64.8.cs" /> <Compile Include="And.Byte.cs" /> <Compile Include="And.Int16.cs" /> <Compile Include="And.Int32.cs" /> <Compile Include="And.Int64.cs" /> <Compile Include="And.SByte.cs" /> <Compile Include="And.UInt16.cs" /> <Compile Include="And.UInt32.cs" /> <Compile Include="And.UInt64.cs" /> <Compile Include="AndNot.Byte.cs" /> <Compile Include="AndNot.Int16.cs" /> <Compile Include="AndNot.Int32.cs" /> <Compile Include="AndNot.Int64.cs" /> <Compile Include="AndNot.SByte.cs" /> <Compile Include="AndNot.UInt16.cs" /> <Compile Include="AndNot.UInt32.cs" /> <Compile Include="AndNot.UInt64.cs" /> <Compile Include="Average.Byte.cs" /> <Compile Include="Average.UInt16.cs" /> <Compile Include="Blend.Int16.1.cs" /> <Compile Include="Blend.Int16.2.cs" /> <Compile Include="Blend.Int16.4.cs" /> <Compile Include="Blend.Int16.85.cs" /> <Compile Include="Blend.UInt16.1.cs" /> <Compile Include="Blend.UInt16.2.cs" /> <Compile Include="Blend.UInt16.4.cs" /> <Compile Include="Blend.UInt16.85.cs" /> <Compile Include="Blend.Int32.1.cs" /> <Compile Include="Blend.Int32.2.cs" /> <Compile Include="Blend.Int32.4.cs" /> <Compile Include="Blend.Int32.85.cs" /> <Compile Include="Blend.UInt32.1.cs" /> <Compile Include="Blend.UInt32.2.cs" /> <Compile Include="Blend.UInt32.4.cs" /> <Compile Include="Blend.UInt32.85.cs" /> <Compile Include="BlendVariable.Byte.cs" /> <Compile Include="BlendVariable.SByte.cs" /> <Compile Include="BlendVariable.Int16.cs" /> <Compile Include="BlendVariable.UInt16.cs" /> <Compile Include="BlendVariable.Int32.cs" /> <Compile Include="BlendVariable.UInt32.cs" /> <Compile Include="BlendVariable.Int64.cs" /> <Compile Include="BlendVariable.UInt64.cs" /> <Compile Include="BroadcastScalarToVector128.Byte.cs" /> <Compile Include="BroadcastScalarToVector128.SByte.cs" /> <Compile Include="BroadcastScalarToVector128.Int16.cs" /> <Compile Include="BroadcastScalarToVector128.UInt16.cs" /> <Compile Include="BroadcastScalarToVector128.Int32.cs" /> <Compile Include="BroadcastScalarToVector128.UInt32.cs" /> <Compile Include="BroadcastScalarToVector128.Int64.cs" /> <Compile Include="BroadcastScalarToVector128.UInt64.cs" /> <Compile Include="BroadcastScalarToVector128.Single.cs" /> <Compile Include="BroadcastScalarToVector128.Double.cs" /> <Compile Include="BroadcastScalarToVector256.Byte.cs" /> <Compile Include="BroadcastScalarToVector256.SByte.cs" /> <Compile Include="BroadcastScalarToVector256.Int16.cs" /> <Compile Include="BroadcastScalarToVector256.UInt16.cs" /> <Compile Include="BroadcastScalarToVector256.Int32.cs" /> <Compile Include="BroadcastScalarToVector256.UInt32.cs" /> <Compile Include="BroadcastScalarToVector256.Int64.cs" /> <Compile Include="BroadcastScalarToVector256.UInt64.cs" /> <Compile Include="BroadcastScalarToVector256.Single.cs" /> <Compile Include="BroadcastScalarToVector256.Double.cs" /> <Compile Include="CompareEqual.Byte.cs" /> <Compile Include="CompareEqual.Int16.cs" /> <Compile Include="CompareEqual.Int32.cs" /> <Compile Include="CompareEqual.Int64.cs" /> <Compile Include="CompareEqual.SByte.cs" /> <Compile Include="CompareEqual.UInt16.cs" /> <Compile Include="CompareEqual.UInt32.cs" /> <Compile Include="CompareEqual.UInt64.cs" /> <Compile Include="CompareGreaterThan.Int16.cs" /> <Compile Include="CompareGreaterThan.Int32.cs" /> <Compile Include="CompareGreaterThan.Int64.cs" /> <Compile Include="CompareGreaterThan.SByte.cs" /> <Compile Include="ConvertToInt32.Int32.cs" /> <Compile Include="ConvertToUInt32.UInt32.cs" /> <Compile Include="ExtractVector128.Byte.1.cs" /> <Compile Include="ExtractVector128.SByte.1.cs" /> <Compile Include="ExtractVector128.Int16.1.cs" /> <Compile Include="ExtractVector128.UInt16.1.cs" /> <Compile Include="ExtractVector128.Int32.1.cs" /> <Compile Include="ExtractVector128.UInt32.1.cs" /> <Compile Include="ExtractVector128.Int64.1.cs" /> <Compile Include="ExtractVector128.UInt64.1.cs" /> <Compile Include="InsertVector128.Byte.1.cs" /> <Compile Include="InsertVector128.SByte.1.cs" /> <Compile Include="InsertVector128.Int16.1.cs" /> <Compile Include="InsertVector128.UInt16.1.cs" /> <Compile Include="InsertVector128.Int32.1.cs" /> <Compile Include="InsertVector128.UInt32.1.cs" /> <Compile Include="InsertVector128.Int64.1.cs" /> <Compile Include="InsertVector128.UInt64.1.cs" /> <Compile Include="MaskLoad.Int32.cs" /> <Compile Include="MaskLoad.UInt32.cs" /> <Compile Include="MaskLoad.Int64.cs" /> <Compile Include="MaskLoad.UInt64.cs" /> <Compile Include="MaskStore.Int32.cs" /> <Compile Include="MaskStore.UInt32.cs" /> <Compile Include="MaskStore.Int64.cs" /> <Compile Include="MaskStore.UInt64.cs" /> <Compile Include="Max.Byte.cs" /> <Compile Include="Max.Int16.cs" /> <Compile Include="Max.Int32.cs" /> <Compile Include="Max.SByte.cs" /> <Compile Include="Max.UInt16.cs" /> <Compile Include="Max.UInt32.cs" /> <Compile Include="Min.Byte.cs" /> <Compile Include="Min.Int16.cs" /> <Compile Include="Min.Int32.cs" /> <Compile Include="Min.SByte.cs" /> <Compile Include="Min.UInt16.cs" /> <Compile Include="Min.UInt32.cs" /> <Compile Include="MultiplyAddAdjacent.Int16.cs" /> <Compile Include="MultiplyAddAdjacent.Int32.cs" /> <Compile Include="MultiplyHighRoundScale.Int16.cs" /> <Compile Include="MultiplyHigh.Int16.cs" /> <Compile Include="MultiplyHigh.UInt16.cs" /> <Compile Include="MultiplyLow.Int16.cs" /> <Compile Include="MultiplyLow.Int32.cs" /> <Compile Include="MultiplyLow.UInt16.cs" /> <Compile Include="MultiplyLow.UInt32.cs" /> <Compile Include="MultipleSumAbsoluteDifferences.UInt16.0.cs" /> <Compile Include="Or.Byte.cs" /> <Compile Include="Or.Int16.cs" /> <Compile Include="Or.Int32.cs" /> <Compile Include="Or.Int64.cs" /> <Compile Include="Or.SByte.cs" /> <Compile Include="Or.UInt16.cs" /> <Compile Include="Or.UInt32.cs" /> <Compile Include="Or.UInt64.cs" /> <Compile Include="PackUnsignedSaturate.UInt16.cs" /> <Compile Include="PackUnsignedSaturate.Byte.cs" /> <Compile Include="PackSignedSaturate.Int16.cs" /> <Compile Include="PackSignedSaturate.SByte.cs" /> <Compile Include="Permute2x128.Int32.2.cs" /> <Compile Include="Permute2x128.UInt32.2.cs" /> <Compile Include="Permute2x128.Int64.2.cs" /> <Compile Include="Permute2x128.UInt64.2.cs" /> <Compile Include="Permute4x64.Double.85.cs" /> <Compile Include="Permute4x64.Int64.85.cs" /> <Compile Include="Permute4x64.UInt64.85.cs" /> <Compile Include="PermuteVar8x32.Int32.cs" /> <Compile Include="PermuteVar8x32.UInt32.cs" /> <Compile Include="PermuteVar8x32.Single.cs" /> <Compile Include="ShiftLeftLogical.Int16.1.cs" /> <Compile Include="ShiftLeftLogical.UInt16.1.cs" /> <Compile Include="ShiftLeftLogical.Int32.1.cs" /> <Compile Include="ShiftLeftLogical.UInt32.1.cs" /> <Compile Include="ShiftLeftLogical.Int64.1.cs" /> <Compile Include="ShiftLeftLogical.UInt64.1.cs" /> <Compile Include="ShiftLeftLogical.Int16.16.cs" /> <Compile Include="ShiftLeftLogical.UInt16.16.cs" /> <Compile Include="ShiftLeftLogical.Int32.32.cs" /> <Compile Include="ShiftLeftLogical.UInt32.32.cs" /> <Compile Include="ShiftLeftLogical.Int64.64.cs" /> <Compile Include="ShiftLeftLogical.UInt64.64.cs" /> <Compile Include="ShiftRightLogical.Int16.1.cs" /> <Compile Include="ShiftRightLogical.UInt16.1.cs" /> <Compile Include="ShiftRightLogical.Int32.1.cs" /> <Compile Include="ShiftRightLogical.UInt32.1.cs" /> <Compile Include="ShiftRightLogical.Int64.1.cs" /> <Compile Include="ShiftRightLogical.UInt64.1.cs" /> <Compile Include="ShiftRightLogical.Int16.16.cs" /> <Compile Include="ShiftRightLogical.UInt16.16.cs" /> <Compile Include="ShiftRightLogical.Int32.32.cs" /> <Compile Include="ShiftRightLogical.UInt32.32.cs" /> <Compile Include="ShiftRightLogical.Int64.64.cs" /> <Compile Include="ShiftRightLogical.UInt64.64.cs" /> <Compile Include="ShiftRightArithmetic.Int16.1.cs" /> <Compile Include="ShiftRightArithmetic.Int32.1.cs" /> <Compile Include="ShiftRightArithmetic.Int16.16.cs" /> <Compile Include="ShiftRightArithmetic.Int32.32.cs" /> <Compile Include="ShiftRightArithmeticVariable.Int32.cs" /> <Compile Include="ShiftLeftLogical128BitLane.SByte.1.cs" /> <Compile Include="ShiftLeftLogical128BitLane.Byte.1.cs" /> <Compile Include="ShiftLeftLogical128BitLane.Int16.1.cs" /> <Compile Include="ShiftLeftLogical128BitLane.UInt16.1.cs" /> <Compile Include="ShiftLeftLogical128BitLane.Int32.1.cs" /> <Compile Include="ShiftLeftLogical128BitLane.UInt32.1.cs" /> <Compile Include="ShiftLeftLogical128BitLane.Int64.1.cs" /> <Compile Include="ShiftLeftLogical128BitLane.UInt64.1.cs" /> <Compile Include="ShiftRightLogical128BitLane.SByte.1.cs" /> <Compile Include="ShiftRightLogical128BitLane.Byte.1.cs" /> <Compile Include="ShiftRightLogical128BitLane.Int16.1.cs" /> <Compile Include="ShiftRightLogical128BitLane.UInt16.1.cs" /> <Compile Include="ShiftRightLogical128BitLane.Int32.1.cs" /> <Compile Include="ShiftRightLogical128BitLane.UInt32.1.cs" /> <Compile Include="ShiftRightLogical128BitLane.Int64.1.cs" /> <Compile Include="ShiftRightLogical128BitLane.UInt64.1.cs" /> <Compile Include="Sign.SByte.cs" /> <Compile Include="Sign.Int16.cs" /> <Compile Include="Sign.Int32.cs" /> <Compile Include="Shuffle.SByte.cs" /> <Compile Include="Shuffle.Byte.cs" /> <Compile Include="Shuffle.Int32.1.cs" /> <Compile Include="Shuffle.UInt32.1.cs" /> <Compile Include="ShuffleHigh.Int16.228.cs" /> <Compile Include="ShuffleHigh.UInt16.228.cs" /> <Compile Include="ShuffleLow.Int16.228.cs" /> <Compile Include="ShuffleLow.UInt16.228.cs" /> <Compile Include="SumAbsoluteDifferences.UInt16.cs" /> <Compile Include="Subtract.Byte.cs" /> <Compile Include="Subtract.Int16.cs" /> <Compile Include="Subtract.Int32.cs" /> <Compile Include="Subtract.Int64.cs" /> <Compile Include="Subtract.SByte.cs" /> <Compile Include="Subtract.UInt16.cs" /> <Compile Include="Subtract.UInt32.cs" /> <Compile Include="Subtract.UInt64.cs" /> <Compile Include="Xor.Byte.cs" /> <Compile Include="Xor.Int16.cs" /> <Compile Include="Xor.Int32.cs" /> <Compile Include="Xor.Int64.cs" /> <Compile Include="Xor.SByte.cs" /> <Compile Include="Xor.UInt16.cs" /> <Compile Include="Xor.UInt32.cs" /> <Compile Include="Xor.UInt64.cs" /> <Compile Include="Program.Avx2.cs" /> <Compile Include="..\Shared\Program.cs" /> <Compile Include="..\Shared\SimdScalarUnOpTest_DataTable.cs" /> <Compile Include="..\Shared\SimpleBinOpTest_DataTable.cs" /> <Compile Include="..\Shared\SimpleUnOpTest_DataTable.cs" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <AllowUnsafeBlocks>true</AllowUnsafeBlocks> <!-- It takes a long time to complete (on a non-AVX machine) --> <UnloadabilityIncompatible>true</UnloadabilityIncompatible> <!-- https://github.com/dotnet/runtime/issues/12392 --> <GCStressIncompatible>true</GCStressIncompatible> </PropertyGroup> <PropertyGroup> <DebugType>Embedded</DebugType> <Optimize /> </PropertyGroup> <ItemGroup> <Compile Include="Add.Byte.cs" /> <Compile Include="Add.Int16.cs" /> <Compile Include="Add.Int32.cs" /> <Compile Include="Add.Int64.cs" /> <Compile Include="Add.SByte.cs" /> <Compile Include="Add.UInt16.cs" /> <Compile Include="Add.UInt32.cs" /> <Compile Include="Add.UInt64.cs" /> <Compile Include="AlignRight.SByte.5.cs" /> <Compile Include="AlignRight.SByte.27.cs" /> <Compile Include="AlignRight.SByte.228.cs" /> <Compile Include="AlignRight.SByte.250.cs" /> <Compile Include="AlignRight.Byte.5.cs" /> <Compile Include="AlignRight.Byte.27.cs" /> <Compile Include="AlignRight.Byte.228.cs" /> <Compile Include="AlignRight.Byte.250.cs" /> <Compile Include="AlignRight.Int16.0.cs" /> <Compile Include="AlignRight.Int16.2.cs" /> <Compile Include="AlignRight.UInt16.0.cs" /> <Compile Include="AlignRight.UInt16.2.cs" /> <Compile Include="AlignRight.Int32.0.cs" /> <Compile Include="AlignRight.Int32.4.cs" /> <Compile Include="AlignRight.UInt32.0.cs" /> <Compile Include="AlignRight.UInt32.4.cs" /> <Compile Include="AlignRight.Int64.0.cs" /> <Compile Include="AlignRight.Int64.8.cs" /> <Compile Include="AlignRight.UInt64.0.cs" /> <Compile Include="AlignRight.UInt64.8.cs" /> <Compile Include="And.Byte.cs" /> <Compile Include="And.Int16.cs" /> <Compile Include="And.Int32.cs" /> <Compile Include="And.Int64.cs" /> <Compile Include="And.SByte.cs" /> <Compile Include="And.UInt16.cs" /> <Compile Include="And.UInt32.cs" /> <Compile Include="And.UInt64.cs" /> <Compile Include="AndNot.Byte.cs" /> <Compile Include="AndNot.Int16.cs" /> <Compile Include="AndNot.Int32.cs" /> <Compile Include="AndNot.Int64.cs" /> <Compile Include="AndNot.SByte.cs" /> <Compile Include="AndNot.UInt16.cs" /> <Compile Include="AndNot.UInt32.cs" /> <Compile Include="AndNot.UInt64.cs" /> <Compile Include="Average.Byte.cs" /> <Compile Include="Average.UInt16.cs" /> <Compile Include="Blend.Int16.1.cs" /> <Compile Include="Blend.Int16.2.cs" /> <Compile Include="Blend.Int16.4.cs" /> <Compile Include="Blend.Int16.85.cs" /> <Compile Include="Blend.UInt16.1.cs" /> <Compile Include="Blend.UInt16.2.cs" /> <Compile Include="Blend.UInt16.4.cs" /> <Compile Include="Blend.UInt16.85.cs" /> <Compile Include="Blend.Int32.1.cs" /> <Compile Include="Blend.Int32.2.cs" /> <Compile Include="Blend.Int32.4.cs" /> <Compile Include="Blend.Int32.85.cs" /> <Compile Include="Blend.UInt32.1.cs" /> <Compile Include="Blend.UInt32.2.cs" /> <Compile Include="Blend.UInt32.4.cs" /> <Compile Include="Blend.UInt32.85.cs" /> <Compile Include="BlendVariable.Byte.cs" /> <Compile Include="BlendVariable.SByte.cs" /> <Compile Include="BlendVariable.Int16.cs" /> <Compile Include="BlendVariable.UInt16.cs" /> <Compile Include="BlendVariable.Int32.cs" /> <Compile Include="BlendVariable.UInt32.cs" /> <Compile Include="BlendVariable.Int64.cs" /> <Compile Include="BlendVariable.UInt64.cs" /> <Compile Include="BroadcastScalarToVector128.Byte.cs" /> <Compile Include="BroadcastScalarToVector128.SByte.cs" /> <Compile Include="BroadcastScalarToVector128.Int16.cs" /> <Compile Include="BroadcastScalarToVector128.UInt16.cs" /> <Compile Include="BroadcastScalarToVector128.Int32.cs" /> <Compile Include="BroadcastScalarToVector128.UInt32.cs" /> <Compile Include="BroadcastScalarToVector128.Int64.cs" /> <Compile Include="BroadcastScalarToVector128.UInt64.cs" /> <Compile Include="BroadcastScalarToVector128.Single.cs" /> <Compile Include="BroadcastScalarToVector128.Double.cs" /> <Compile Include="BroadcastScalarToVector256.Byte.cs" /> <Compile Include="BroadcastScalarToVector256.SByte.cs" /> <Compile Include="BroadcastScalarToVector256.Int16.cs" /> <Compile Include="BroadcastScalarToVector256.UInt16.cs" /> <Compile Include="BroadcastScalarToVector256.Int32.cs" /> <Compile Include="BroadcastScalarToVector256.UInt32.cs" /> <Compile Include="BroadcastScalarToVector256.Int64.cs" /> <Compile Include="BroadcastScalarToVector256.UInt64.cs" /> <Compile Include="BroadcastScalarToVector256.Single.cs" /> <Compile Include="BroadcastScalarToVector256.Double.cs" /> <Compile Include="CompareEqual.Byte.cs" /> <Compile Include="CompareEqual.Int16.cs" /> <Compile Include="CompareEqual.Int32.cs" /> <Compile Include="CompareEqual.Int64.cs" /> <Compile Include="CompareEqual.SByte.cs" /> <Compile Include="CompareEqual.UInt16.cs" /> <Compile Include="CompareEqual.UInt32.cs" /> <Compile Include="CompareEqual.UInt64.cs" /> <Compile Include="CompareGreaterThan.Int16.cs" /> <Compile Include="CompareGreaterThan.Int32.cs" /> <Compile Include="CompareGreaterThan.Int64.cs" /> <Compile Include="CompareGreaterThan.SByte.cs" /> <Compile Include="ConvertToInt32.Int32.cs" /> <Compile Include="ConvertToUInt32.UInt32.cs" /> <Compile Include="ExtractVector128.Byte.1.cs" /> <Compile Include="ExtractVector128.SByte.1.cs" /> <Compile Include="ExtractVector128.Int16.1.cs" /> <Compile Include="ExtractVector128.UInt16.1.cs" /> <Compile Include="ExtractVector128.Int32.1.cs" /> <Compile Include="ExtractVector128.UInt32.1.cs" /> <Compile Include="ExtractVector128.Int64.1.cs" /> <Compile Include="ExtractVector128.UInt64.1.cs" /> <Compile Include="InsertVector128.Byte.1.cs" /> <Compile Include="InsertVector128.SByte.1.cs" /> <Compile Include="InsertVector128.Int16.1.cs" /> <Compile Include="InsertVector128.UInt16.1.cs" /> <Compile Include="InsertVector128.Int32.1.cs" /> <Compile Include="InsertVector128.UInt32.1.cs" /> <Compile Include="InsertVector128.Int64.1.cs" /> <Compile Include="InsertVector128.UInt64.1.cs" /> <Compile Include="MaskLoad.Int32.cs" /> <Compile Include="MaskLoad.UInt32.cs" /> <Compile Include="MaskLoad.Int64.cs" /> <Compile Include="MaskLoad.UInt64.cs" /> <Compile Include="MaskStore.Int32.cs" /> <Compile Include="MaskStore.UInt32.cs" /> <Compile Include="MaskStore.Int64.cs" /> <Compile Include="MaskStore.UInt64.cs" /> <Compile Include="Max.Byte.cs" /> <Compile Include="Max.Int16.cs" /> <Compile Include="Max.Int32.cs" /> <Compile Include="Max.SByte.cs" /> <Compile Include="Max.UInt16.cs" /> <Compile Include="Max.UInt32.cs" /> <Compile Include="Min.Byte.cs" /> <Compile Include="Min.Int16.cs" /> <Compile Include="Min.Int32.cs" /> <Compile Include="Min.SByte.cs" /> <Compile Include="Min.UInt16.cs" /> <Compile Include="Min.UInt32.cs" /> <Compile Include="MultiplyAddAdjacent.Int16.cs" /> <Compile Include="MultiplyAddAdjacent.Int32.cs" /> <Compile Include="MultiplyHighRoundScale.Int16.cs" /> <Compile Include="MultiplyHigh.Int16.cs" /> <Compile Include="MultiplyHigh.UInt16.cs" /> <Compile Include="MultiplyLow.Int16.cs" /> <Compile Include="MultiplyLow.Int32.cs" /> <Compile Include="MultiplyLow.UInt16.cs" /> <Compile Include="MultiplyLow.UInt32.cs" /> <Compile Include="MultipleSumAbsoluteDifferences.UInt16.0.cs" /> <Compile Include="Or.Byte.cs" /> <Compile Include="Or.Int16.cs" /> <Compile Include="Or.Int32.cs" /> <Compile Include="Or.Int64.cs" /> <Compile Include="Or.SByte.cs" /> <Compile Include="Or.UInt16.cs" /> <Compile Include="Or.UInt32.cs" /> <Compile Include="Or.UInt64.cs" /> <Compile Include="PackUnsignedSaturate.UInt16.cs" /> <Compile Include="PackUnsignedSaturate.Byte.cs" /> <Compile Include="PackSignedSaturate.Int16.cs" /> <Compile Include="PackSignedSaturate.SByte.cs" /> <Compile Include="Permute2x128.Int32.2.cs" /> <Compile Include="Permute2x128.UInt32.2.cs" /> <Compile Include="Permute2x128.Int64.2.cs" /> <Compile Include="Permute2x128.UInt64.2.cs" /> <Compile Include="Permute4x64.Double.85.cs" /> <Compile Include="Permute4x64.Int64.85.cs" /> <Compile Include="Permute4x64.UInt64.85.cs" /> <Compile Include="PermuteVar8x32.Int32.cs" /> <Compile Include="PermuteVar8x32.UInt32.cs" /> <Compile Include="PermuteVar8x32.Single.cs" /> <Compile Include="ShiftLeftLogical.Int16.1.cs" /> <Compile Include="ShiftLeftLogical.UInt16.1.cs" /> <Compile Include="ShiftLeftLogical.Int32.1.cs" /> <Compile Include="ShiftLeftLogical.UInt32.1.cs" /> <Compile Include="ShiftLeftLogical.Int64.1.cs" /> <Compile Include="ShiftLeftLogical.UInt64.1.cs" /> <Compile Include="ShiftLeftLogical.Int16.16.cs" /> <Compile Include="ShiftLeftLogical.UInt16.16.cs" /> <Compile Include="ShiftLeftLogical.Int32.32.cs" /> <Compile Include="ShiftLeftLogical.UInt32.32.cs" /> <Compile Include="ShiftLeftLogical.Int64.64.cs" /> <Compile Include="ShiftLeftLogical.UInt64.64.cs" /> <Compile Include="ShiftRightLogical.Int16.1.cs" /> <Compile Include="ShiftRightLogical.UInt16.1.cs" /> <Compile Include="ShiftRightLogical.Int32.1.cs" /> <Compile Include="ShiftRightLogical.UInt32.1.cs" /> <Compile Include="ShiftRightLogical.Int64.1.cs" /> <Compile Include="ShiftRightLogical.UInt64.1.cs" /> <Compile Include="ShiftRightLogical.Int16.16.cs" /> <Compile Include="ShiftRightLogical.UInt16.16.cs" /> <Compile Include="ShiftRightLogical.Int32.32.cs" /> <Compile Include="ShiftRightLogical.UInt32.32.cs" /> <Compile Include="ShiftRightLogical.Int64.64.cs" /> <Compile Include="ShiftRightLogical.UInt64.64.cs" /> <Compile Include="ShiftRightArithmetic.Int16.1.cs" /> <Compile Include="ShiftRightArithmetic.Int32.1.cs" /> <Compile Include="ShiftRightArithmetic.Int16.16.cs" /> <Compile Include="ShiftRightArithmetic.Int32.32.cs" /> <Compile Include="ShiftRightArithmeticVariable.Int32.cs" /> <Compile Include="ShiftLeftLogical128BitLane.SByte.1.cs" /> <Compile Include="ShiftLeftLogical128BitLane.Byte.1.cs" /> <Compile Include="ShiftLeftLogical128BitLane.Int16.1.cs" /> <Compile Include="ShiftLeftLogical128BitLane.UInt16.1.cs" /> <Compile Include="ShiftLeftLogical128BitLane.Int32.1.cs" /> <Compile Include="ShiftLeftLogical128BitLane.UInt32.1.cs" /> <Compile Include="ShiftLeftLogical128BitLane.Int64.1.cs" /> <Compile Include="ShiftLeftLogical128BitLane.UInt64.1.cs" /> <Compile Include="ShiftRightLogical128BitLane.SByte.1.cs" /> <Compile Include="ShiftRightLogical128BitLane.Byte.1.cs" /> <Compile Include="ShiftRightLogical128BitLane.Int16.1.cs" /> <Compile Include="ShiftRightLogical128BitLane.UInt16.1.cs" /> <Compile Include="ShiftRightLogical128BitLane.Int32.1.cs" /> <Compile Include="ShiftRightLogical128BitLane.UInt32.1.cs" /> <Compile Include="ShiftRightLogical128BitLane.Int64.1.cs" /> <Compile Include="ShiftRightLogical128BitLane.UInt64.1.cs" /> <Compile Include="Sign.SByte.cs" /> <Compile Include="Sign.Int16.cs" /> <Compile Include="Sign.Int32.cs" /> <Compile Include="Shuffle.SByte.cs" /> <Compile Include="Shuffle.Byte.cs" /> <Compile Include="Shuffle.Int32.1.cs" /> <Compile Include="Shuffle.UInt32.1.cs" /> <Compile Include="ShuffleHigh.Int16.228.cs" /> <Compile Include="ShuffleHigh.UInt16.228.cs" /> <Compile Include="ShuffleLow.Int16.228.cs" /> <Compile Include="ShuffleLow.UInt16.228.cs" /> <Compile Include="SumAbsoluteDifferences.UInt16.cs" /> <Compile Include="Subtract.Byte.cs" /> <Compile Include="Subtract.Int16.cs" /> <Compile Include="Subtract.Int32.cs" /> <Compile Include="Subtract.Int64.cs" /> <Compile Include="Subtract.SByte.cs" /> <Compile Include="Subtract.UInt16.cs" /> <Compile Include="Subtract.UInt32.cs" /> <Compile Include="Subtract.UInt64.cs" /> <Compile Include="Xor.Byte.cs" /> <Compile Include="Xor.Int16.cs" /> <Compile Include="Xor.Int32.cs" /> <Compile Include="Xor.Int64.cs" /> <Compile Include="Xor.SByte.cs" /> <Compile Include="Xor.UInt16.cs" /> <Compile Include="Xor.UInt32.cs" /> <Compile Include="Xor.UInt64.cs" /> <Compile Include="Program.Avx2.cs" /> <Compile Include="..\Shared\Program.cs" /> <Compile Include="..\Shared\SimdScalarUnOpTest_DataTable.cs" /> <Compile Include="..\Shared\SimpleBinOpTest_DataTable.cs" /> <Compile Include="..\Shared\SimpleUnOpTest_DataTable.cs" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,235
Revert "Fix issues related to JsonSerializerOptions mutation and caching."
Reverts dotnet/runtime#65863 Fixes #66232
jkotas
2022-03-05T06:23:45Z
2022-03-05T14:08:07Z
17662fc30cfd4cc6e7dbba978a3fb512380c0b70
3ede8095da51d5d27a890020b61376155e9a61c2
Revert "Fix issues related to JsonSerializerOptions mutation and caching.". Reverts dotnet/runtime#65863 Fixes #66232
./src/coreclr/tools/Common/TypeSystem/Common/InstantiatedType.MethodImpls.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; namespace Internal.TypeSystem { // Implementation of MethodImpl api surface implemented without metadata access. public partial class InstantiatedType { /// <summary> /// Instantiate a MethodImplRecord from uninstantiated form to instantiated form /// </summary> /// <param name="uninstMethodImpls"></param> /// <returns></returns> private MethodImplRecord[] InstantiateMethodImpls(MethodImplRecord[] uninstMethodImpls) { if (uninstMethodImpls == null || uninstMethodImpls.Length == 0) return uninstMethodImpls; MethodImplRecord[] instMethodImpls = new MethodImplRecord[uninstMethodImpls.Length]; for (int i = 0; i < uninstMethodImpls.Length; i++) { MethodDesc decl; var implTypeInstantiated = uninstMethodImpls[i].Decl.OwningType.InstantiateSignature(this.Instantiation, new Instantiation()); if (implTypeInstantiated is InstantiatedType) { decl = _typeDef.Context.GetMethodForInstantiatedType(uninstMethodImpls[i].Decl.GetTypicalMethodDefinition(), (InstantiatedType)implTypeInstantiated); } else { decl = uninstMethodImpls[i].Decl; } MethodDesc body = _typeDef.Context.GetMethodForInstantiatedType(uninstMethodImpls[i].Body, this); instMethodImpls[i] = new MethodImplRecord(decl, body); } return instMethodImpls; } protected override MethodImplRecord[] ComputeVirtualMethodImplsForType() { MethodImplRecord[] uninstMethodImpls = _typeDef.VirtualMethodImplsForType; return InstantiateMethodImpls(uninstMethodImpls); } public override MethodImplRecord[] FindMethodsImplWithMatchingDeclName(string name) { MethodImplRecord[] uninstMethodImpls = _typeDef.FindMethodsImplWithMatchingDeclName(name); return InstantiateMethodImpls(uninstMethodImpls); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; namespace Internal.TypeSystem { // Implementation of MethodImpl api surface implemented without metadata access. public partial class InstantiatedType { /// <summary> /// Instantiate a MethodImplRecord from uninstantiated form to instantiated form /// </summary> /// <param name="uninstMethodImpls"></param> /// <returns></returns> private MethodImplRecord[] InstantiateMethodImpls(MethodImplRecord[] uninstMethodImpls) { if (uninstMethodImpls == null || uninstMethodImpls.Length == 0) return uninstMethodImpls; MethodImplRecord[] instMethodImpls = new MethodImplRecord[uninstMethodImpls.Length]; for (int i = 0; i < uninstMethodImpls.Length; i++) { MethodDesc decl; var implTypeInstantiated = uninstMethodImpls[i].Decl.OwningType.InstantiateSignature(this.Instantiation, new Instantiation()); if (implTypeInstantiated is InstantiatedType) { decl = _typeDef.Context.GetMethodForInstantiatedType(uninstMethodImpls[i].Decl.GetTypicalMethodDefinition(), (InstantiatedType)implTypeInstantiated); } else { decl = uninstMethodImpls[i].Decl; } MethodDesc body = _typeDef.Context.GetMethodForInstantiatedType(uninstMethodImpls[i].Body, this); instMethodImpls[i] = new MethodImplRecord(decl, body); } return instMethodImpls; } protected override MethodImplRecord[] ComputeVirtualMethodImplsForType() { MethodImplRecord[] uninstMethodImpls = _typeDef.VirtualMethodImplsForType; return InstantiateMethodImpls(uninstMethodImpls); } public override MethodImplRecord[] FindMethodsImplWithMatchingDeclName(string name) { MethodImplRecord[] uninstMethodImpls = _typeDef.FindMethodsImplWithMatchingDeclName(name); return InstantiateMethodImpls(uninstMethodImpls); } } }
-1
dotnet/runtime
66,235
Revert "Fix issues related to JsonSerializerOptions mutation and caching."
Reverts dotnet/runtime#65863 Fixes #66232
jkotas
2022-03-05T06:23:45Z
2022-03-05T14:08:07Z
17662fc30cfd4cc6e7dbba978a3fb512380c0b70
3ede8095da51d5d27a890020b61376155e9a61c2
Revert "Fix issues related to JsonSerializerOptions mutation and caching.". Reverts dotnet/runtime#65863 Fixes #66232
./src/libraries/System.ObjectModel/tests/ReadOnlyObservableCollection/ReadOnlyObservableCollectionTests.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using System.Diagnostics; using System.Linq; using System.Reflection; using Xunit; namespace System.Collections.ObjectModel.Tests { /// <summary> /// Tests the public properties and constructor in ObservableCollection<T>. /// </summary> public class ReadOnlyObservableCollectionTests { [Fact] public static void Ctor_Tests() { string[] anArray = new string[] { "one", "two", "three", "four", "five" }; ReadOnlyObservableCollection<string> readOnlyCol = new ReadOnlyObservableCollection<string>(new ObservableCollection<string>(anArray)); IReadOnlyList_T_Test<string> helper = new IReadOnlyList_T_Test<string>(readOnlyCol, anArray); helper.InitialItems_Tests(); IList<string> readOnlyColAsIList = readOnlyCol; Assert.True(readOnlyColAsIList.IsReadOnly, "ReadOnlyObservableCollection should be readOnly."); } [Fact] public static void Ctor_Tests_Negative() { AssertExtensions.Throws<ArgumentNullException>("list", () => new ReadOnlyObservableCollection<string>(null)); } [Fact] public static void GetItemTests() { string[] anArray = new string[] { "one", "two", "three", "four", "five" }; ReadOnlyObservableCollection<string> readOnlyCol = new ReadOnlyObservableCollection<string>(new ObservableCollection<string>(anArray)); IReadOnlyList_T_Test<string> helper = new IReadOnlyList_T_Test<string>(readOnlyCol, anArray); helper.Item_get_Tests(); } [Fact] public static void GetItemTests_Negative() { string[] anArray = new string[] { "one", "two", "three", "four", "five" }; ReadOnlyObservableCollection<string> readOnlyCol = new ReadOnlyObservableCollection<string>(new ObservableCollection<string>(anArray)); IReadOnlyList_T_Test<string> helper = new IReadOnlyList_T_Test<string>(readOnlyCol, anArray); helper.Item_get_Tests_Negative(); } /// <summary> /// Tests that contains returns true when the item is in the collection /// and false otherwise. /// </summary> [Fact] public static void ContainsTests() { string[] anArray = new string[] { "one", "two", "three", "four", "five" }; ReadOnlyObservableCollection<string> readOnlyCol = new ReadOnlyObservableCollection<string>(new ObservableCollection<string>(anArray)); for (int i = 0; i < anArray.Length; i++) { string item = anArray[i]; Assert.True(readOnlyCol.Contains(item), "ReadOnlyCol did not contain item: " + anArray[i] + " at index: " + i); } Assert.False(readOnlyCol.Contains("randomItem"), "ReadOnlyCol should not have contained non-existent item"); Assert.False(readOnlyCol.Contains(null), "ReadOnlyCol should not have contained null"); } /// <summary> /// Tests that the collection can be copied into a destination array. /// </summary> [Fact] public static void CopyToTest() { string[] anArray = new string[] { "one", "two", "three", "four" }; ReadOnlyObservableCollection<string> readOnlyCol = new ReadOnlyObservableCollection<string>(new ObservableCollection<string>(anArray)); string[] aCopy = new string[anArray.Length]; readOnlyCol.CopyTo(aCopy, 0); for (int i = 0; i < anArray.Length; ++i) Assert.Equal(anArray[i], aCopy[i]); // copy observable collection starting in middle, where array is larger than source. aCopy = new string[anArray.Length + 2]; int offsetIndex = 1; readOnlyCol.CopyTo(aCopy, offsetIndex); for (int i = 0; i < aCopy.Length; i++) { string value = aCopy[i]; if (i == 0) Assert.True(null == value, "Should not have a value since we did not start copying there."); else if (i == (aCopy.Length - 1)) Assert.True(null == value, "Should not have a value since the collection is shorter than the copy array.."); else { int indexInCollection = i - offsetIndex; Assert.Equal(readOnlyCol[indexInCollection], aCopy[i]); } } } /// <summary> /// Tests that: /// ArgumentOutOfRangeException is thrown when the Index is >= collection.Count /// or Index < 0. /// ArgumentException when the destination array does not have enough space to /// contain the source Collection. /// ArgumentNullException when the destination array is null. /// </summary> [Fact] public static void CopyToTest_Negative() { string[] anArray = new string[] { "one", "two", "three", "four" }; ReadOnlyObservableCollection<string> readOnlyCol = new ReadOnlyObservableCollection<string>(new ObservableCollection<string>(anArray)); int[] iArrInvalidValues = new int[] { -1, -2, -100, -1000, -10000, -100000, -1000000, -10000000, -100000000, -1000000000, int.MinValue }; foreach (var index in iArrInvalidValues) { string[] aCopy = new string[anArray.Length]; AssertExtensions.Throws<ArgumentOutOfRangeException>("destinationIndex", "dstIndex", () => readOnlyCol.CopyTo(aCopy, index)); } int[] iArrLargeValues = new int[] { anArray.Length, int.MaxValue, int.MaxValue / 2, int.MaxValue / 10 }; foreach (var index in iArrLargeValues) { string[] aCopy = new string[anArray.Length]; AssertExtensions.Throws<ArgumentException>("destinationArray", null, () => readOnlyCol.CopyTo(aCopy, index)); } AssertExtensions.Throws<ArgumentNullException>("destinationArray", "dest", () => readOnlyCol.CopyTo(null, 1)); string[] copy = new string[anArray.Length - 1]; AssertExtensions.Throws<ArgumentException>("destinationArray", "", () => readOnlyCol.CopyTo(copy, 0)); copy = new string[0]; AssertExtensions.Throws<ArgumentException>("destinationArray", "", () => readOnlyCol.CopyTo(copy, 0)); } /// <summary> /// Tests that the index of an item can be retrieved when the item is /// in the collection and -1 otherwise. /// </summary> [Fact] public static void IndexOfTest() { string[] anArray = new string[] { "one", "two", "three", "four" }; ReadOnlyObservableCollection<string> readOnlyCollection = new ReadOnlyObservableCollection<string>(new ObservableCollection<string>(anArray)); for (int i = 0; i < anArray.Length; ++i) Assert.Equal(i, readOnlyCollection.IndexOf(anArray[i])); Assert.Equal(-1, readOnlyCollection.IndexOf("seven")); Assert.Equal(-1, readOnlyCollection.IndexOf(null)); // testing that the first occurrence is the index returned. ObservableCollection<int> intCol = new ObservableCollection<int>(); for (int i = 0; i < 4; ++i) intCol.Add(i % 2); ReadOnlyObservableCollection<int> intReadOnlyCol = new ReadOnlyObservableCollection<int>(intCol); Assert.Equal(0, intReadOnlyCol.IndexOf(0)); Assert.Equal(1, intReadOnlyCol.IndexOf(1)); IList colAsIList = (IList)intReadOnlyCol; var index = colAsIList.IndexOf("stringObj"); Assert.Equal(-1, index); } /// <summary> /// Tests that a ReadOnlyDictionary cannot be modified. That is, that /// Add, Remove, Clear does not work. /// </summary> [Fact] public static void CannotModifyDictionaryTests_Negative() { string[] anArray = new string[] { "one", "two", "three", "four", "five" }; ReadOnlyObservableCollection<string> readOnlyCol = new ReadOnlyObservableCollection<string>(new ObservableCollection<string>(anArray)); IReadOnlyList_T_Test<string> helper = new IReadOnlyList_T_Test<string>(); IList<string> readOnlyColAsIList = readOnlyCol; Assert.Throws<NotSupportedException>(() => readOnlyColAsIList.Add("seven")); Assert.Throws<NotSupportedException>(() => readOnlyColAsIList.Insert(0, "nine")); Assert.Throws<NotSupportedException>(() => readOnlyColAsIList.Remove("one")); Assert.Throws<NotSupportedException>(() => readOnlyColAsIList.RemoveAt(0)); Assert.Throws<NotSupportedException>(() => readOnlyColAsIList.Clear()); helper.VerifyReadOnlyCollection(readOnlyCol, anArray); } [Fact] [ActiveIssue("https://github.com/dotnet/runtime/issues/57588", typeof(PlatformDetection), nameof(PlatformDetection.IsBuiltWithAggressiveTrimming), nameof(PlatformDetection.IsBrowser))] public static void DebuggerAttribute_Tests() { ReadOnlyObservableCollection<int> col = new ReadOnlyObservableCollection<int>(new ObservableCollection<int>(new[] {1, 2, 3, 4})); DebuggerAttributes.ValidateDebuggerDisplayReferences(col); DebuggerAttributeInfo info = DebuggerAttributes.ValidateDebuggerTypeProxyProperties(col); PropertyInfo itemProperty = info.Properties.Single(pr => pr.GetCustomAttribute<DebuggerBrowsableAttribute>().State == DebuggerBrowsableState.RootHidden); int[] items = itemProperty.GetValue(info.Instance) as int[]; Assert.Equal(col, items); } [Fact] [ActiveIssue("https://github.com/dotnet/runtime/issues/57588", typeof(PlatformDetection), nameof(PlatformDetection.IsBuiltWithAggressiveTrimming), nameof(PlatformDetection.IsBrowser))] public static void DebuggerAttribute_NullCollection_ThrowsArgumentNullException() { TargetInvocationException ex = Assert.Throws<TargetInvocationException>(() => DebuggerAttributes.ValidateDebuggerTypeProxyProperties(typeof(ReadOnlyObservableCollection<int>), null)); ArgumentNullException argumentNullException = Assert.IsType<ArgumentNullException>(ex.InnerException); } } internal class IReadOnlyList_T_Test<T> { private readonly IReadOnlyList<T> _collection; private readonly T[] _expectedItems; /// <summary> /// Initializes a new instance of the IReadOnlyList_T_Test. /// </summary> /// <param name="collection">The collection to run the tests on.</param> /// <param name="expectedItems">The items expected to be in the collection.</param> public IReadOnlyList_T_Test(IReadOnlyList<T> collection, T[] expectedItems) { _collection = collection; _expectedItems = expectedItems; } public IReadOnlyList_T_Test() { } /// <summary> /// This verifies that the collection contains the expected items. /// </summary> public void InitialItems_Tests() { // Verify Count returns the expected value Assert.Equal(_expectedItems.Length, _collection.Count); // Verify the initial items in the collection VerifyReadOnlyCollection(_collection, _expectedItems); } /// <summary> /// Runs all of the valid tests on get Item. /// </summary> public void Item_get_Tests() { // Verify get_Item with valid item on Collection Verify_get(_collection, _expectedItems); } /// <summary> /// Runs all of the argument checking(invalid) tests on get Item. /// </summary> public void Item_get_Tests_Negative() { // Verify get_Item with index=Int32.MinValue AssertExtensions.Throws<ArgumentOutOfRangeException>("index", () => _collection[int.MinValue]); // Verify that the collection was not mutated VerifyReadOnlyCollection(_collection, _expectedItems); // Verify get_Item with index=-1 AssertExtensions.Throws<ArgumentOutOfRangeException>("index", () => _collection[-1]); // Verify that the collection was not mutated VerifyReadOnlyCollection(_collection, _expectedItems); if (_expectedItems.Length == 0) { // Verify get_Item with index=0 on Empty collection AssertExtensions.Throws<ArgumentOutOfRangeException>("index", () => _collection[0]); // Verify that the collection was not mutated VerifyReadOnlyCollection(_collection, _expectedItems); } else { // Verify get_Item with index=Count on Empty collection AssertExtensions.Throws<ArgumentOutOfRangeException>("index", () => _collection[_expectedItems.Length]); // Verify that the collection was not mutated VerifyReadOnlyCollection(_collection, _expectedItems); } } #region Helper Methods /// <summary> /// Verifies that the items in the collection match the expected items. /// </summary> internal void VerifyReadOnlyCollection(IReadOnlyList<T> collection, T[] items) { Verify_get(collection, items); VerifyGenericEnumerator(collection, items); VerifyEnumerator(collection, items); } /// <summary> /// Verifies that you can get all items that should be in the collection. /// </summary> private void Verify_get(IReadOnlyList<T> collection, T[] items) { Assert.Equal(items.Length, collection.Count); for (int i = 0; i < items.Length; i++) { int itemsIndex = i; Assert.Equal(items[itemsIndex], collection[i]); } } /// <summary> /// Verifies that the generic enumerator retrieves the correct items. /// </summary> private void VerifyGenericEnumerator(IReadOnlyList<T> collection, T[] expectedItems) { IEnumerator<T> enumerator = collection.GetEnumerator(); int iterations = 0; int expectedCount = expectedItems.Length; // There is a sequential order to the collection, so we're testing for that. while ((iterations < expectedCount) && enumerator.MoveNext()) { T currentItem = enumerator.Current; T tempItem; // Verify we have not gotten more items then we expected Assert.True(iterations < expectedCount, "Err_9844awpa More items have been returned from the enumerator(" + iterations + " items) than are in the expectedElements(" + expectedCount + " items)"); // Verify Current returned the correct value Assert.Equal(currentItem, expectedItems[iterations]); // Verify Current always returns the same value every time it is called for (int i = 0; i < 3; i++) { tempItem = enumerator.Current; Assert.Equal(currentItem, tempItem); } iterations++; } Assert.Equal(expectedCount, iterations); for (int i = 0; i < 3; i++) { Assert.False(enumerator.MoveNext(), "Err_2929ahiea Expected MoveNext to return false after" + iterations + " iterations"); } enumerator.Dispose(); } /// <summary> /// Verifies that the non-generic enumerator retrieves the correct items. /// </summary> private void VerifyEnumerator(IReadOnlyList<T> collection, T[] expectedItems) { IEnumerator enumerator = collection.GetEnumerator(); int iterations = 0; int expectedCount = expectedItems.Length; // There is no sequential order to the collection, so we're testing that all the items // in the readonlydictionary exist in the array. bool[] itemsVisited = new bool[expectedCount]; bool itemFound; while ((iterations < expectedCount) && enumerator.MoveNext()) { object currentItem = enumerator.Current; object tempItem; // Verify we have not gotten more items then we expected Assert.True(iterations < expectedCount, "Err_9844awpa More items have been returned from the enumerator(" + iterations + " items) then are in the expectedElements(" + expectedCount + " items)"); // Verify Current returned the correct value itemFound = false; for (int i = 0; i < itemsVisited.Length; ++i) { if (!itemsVisited[i] && expectedItems[i].Equals(currentItem)) { itemsVisited[i] = true; itemFound = true; break; } } Assert.True(itemFound, "Err_1432pauy Current returned unexpected value=" + currentItem); // Verify Current always returns the same value every time it is called for (int i = 0; i < 3; i++) { tempItem = enumerator.Current; Assert.Equal(currentItem, tempItem); } iterations++; } for (int i = 0; i < expectedCount; ++i) { Assert.True(itemsVisited[i], "Err_052848ahiedoi Expected Current to return true for item: " + expectedItems[i] + "index: " + i); } Assert.Equal(expectedCount, iterations); for (int i = 0; i < 3; i++) { Assert.False(enumerator.MoveNext(), "Err_2929ahiea Expected MoveNext to return false after" + iterations + " iterations"); } } #endregion } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using System.Diagnostics; using System.Linq; using System.Reflection; using Xunit; namespace System.Collections.ObjectModel.Tests { /// <summary> /// Tests the public properties and constructor in ObservableCollection<T>. /// </summary> public class ReadOnlyObservableCollectionTests { [Fact] public static void Ctor_Tests() { string[] anArray = new string[] { "one", "two", "three", "four", "five" }; ReadOnlyObservableCollection<string> readOnlyCol = new ReadOnlyObservableCollection<string>(new ObservableCollection<string>(anArray)); IReadOnlyList_T_Test<string> helper = new IReadOnlyList_T_Test<string>(readOnlyCol, anArray); helper.InitialItems_Tests(); IList<string> readOnlyColAsIList = readOnlyCol; Assert.True(readOnlyColAsIList.IsReadOnly, "ReadOnlyObservableCollection should be readOnly."); } [Fact] public static void Ctor_Tests_Negative() { AssertExtensions.Throws<ArgumentNullException>("list", () => new ReadOnlyObservableCollection<string>(null)); } [Fact] public static void GetItemTests() { string[] anArray = new string[] { "one", "two", "three", "four", "five" }; ReadOnlyObservableCollection<string> readOnlyCol = new ReadOnlyObservableCollection<string>(new ObservableCollection<string>(anArray)); IReadOnlyList_T_Test<string> helper = new IReadOnlyList_T_Test<string>(readOnlyCol, anArray); helper.Item_get_Tests(); } [Fact] public static void GetItemTests_Negative() { string[] anArray = new string[] { "one", "two", "three", "four", "five" }; ReadOnlyObservableCollection<string> readOnlyCol = new ReadOnlyObservableCollection<string>(new ObservableCollection<string>(anArray)); IReadOnlyList_T_Test<string> helper = new IReadOnlyList_T_Test<string>(readOnlyCol, anArray); helper.Item_get_Tests_Negative(); } /// <summary> /// Tests that contains returns true when the item is in the collection /// and false otherwise. /// </summary> [Fact] public static void ContainsTests() { string[] anArray = new string[] { "one", "two", "three", "four", "five" }; ReadOnlyObservableCollection<string> readOnlyCol = new ReadOnlyObservableCollection<string>(new ObservableCollection<string>(anArray)); for (int i = 0; i < anArray.Length; i++) { string item = anArray[i]; Assert.True(readOnlyCol.Contains(item), "ReadOnlyCol did not contain item: " + anArray[i] + " at index: " + i); } Assert.False(readOnlyCol.Contains("randomItem"), "ReadOnlyCol should not have contained non-existent item"); Assert.False(readOnlyCol.Contains(null), "ReadOnlyCol should not have contained null"); } /// <summary> /// Tests that the collection can be copied into a destination array. /// </summary> [Fact] public static void CopyToTest() { string[] anArray = new string[] { "one", "two", "three", "four" }; ReadOnlyObservableCollection<string> readOnlyCol = new ReadOnlyObservableCollection<string>(new ObservableCollection<string>(anArray)); string[] aCopy = new string[anArray.Length]; readOnlyCol.CopyTo(aCopy, 0); for (int i = 0; i < anArray.Length; ++i) Assert.Equal(anArray[i], aCopy[i]); // copy observable collection starting in middle, where array is larger than source. aCopy = new string[anArray.Length + 2]; int offsetIndex = 1; readOnlyCol.CopyTo(aCopy, offsetIndex); for (int i = 0; i < aCopy.Length; i++) { string value = aCopy[i]; if (i == 0) Assert.True(null == value, "Should not have a value since we did not start copying there."); else if (i == (aCopy.Length - 1)) Assert.True(null == value, "Should not have a value since the collection is shorter than the copy array.."); else { int indexInCollection = i - offsetIndex; Assert.Equal(readOnlyCol[indexInCollection], aCopy[i]); } } } /// <summary> /// Tests that: /// ArgumentOutOfRangeException is thrown when the Index is >= collection.Count /// or Index < 0. /// ArgumentException when the destination array does not have enough space to /// contain the source Collection. /// ArgumentNullException when the destination array is null. /// </summary> [Fact] public static void CopyToTest_Negative() { string[] anArray = new string[] { "one", "two", "three", "four" }; ReadOnlyObservableCollection<string> readOnlyCol = new ReadOnlyObservableCollection<string>(new ObservableCollection<string>(anArray)); int[] iArrInvalidValues = new int[] { -1, -2, -100, -1000, -10000, -100000, -1000000, -10000000, -100000000, -1000000000, int.MinValue }; foreach (var index in iArrInvalidValues) { string[] aCopy = new string[anArray.Length]; AssertExtensions.Throws<ArgumentOutOfRangeException>("destinationIndex", "dstIndex", () => readOnlyCol.CopyTo(aCopy, index)); } int[] iArrLargeValues = new int[] { anArray.Length, int.MaxValue, int.MaxValue / 2, int.MaxValue / 10 }; foreach (var index in iArrLargeValues) { string[] aCopy = new string[anArray.Length]; AssertExtensions.Throws<ArgumentException>("destinationArray", null, () => readOnlyCol.CopyTo(aCopy, index)); } AssertExtensions.Throws<ArgumentNullException>("destinationArray", "dest", () => readOnlyCol.CopyTo(null, 1)); string[] copy = new string[anArray.Length - 1]; AssertExtensions.Throws<ArgumentException>("destinationArray", "", () => readOnlyCol.CopyTo(copy, 0)); copy = new string[0]; AssertExtensions.Throws<ArgumentException>("destinationArray", "", () => readOnlyCol.CopyTo(copy, 0)); } /// <summary> /// Tests that the index of an item can be retrieved when the item is /// in the collection and -1 otherwise. /// </summary> [Fact] public static void IndexOfTest() { string[] anArray = new string[] { "one", "two", "three", "four" }; ReadOnlyObservableCollection<string> readOnlyCollection = new ReadOnlyObservableCollection<string>(new ObservableCollection<string>(anArray)); for (int i = 0; i < anArray.Length; ++i) Assert.Equal(i, readOnlyCollection.IndexOf(anArray[i])); Assert.Equal(-1, readOnlyCollection.IndexOf("seven")); Assert.Equal(-1, readOnlyCollection.IndexOf(null)); // testing that the first occurrence is the index returned. ObservableCollection<int> intCol = new ObservableCollection<int>(); for (int i = 0; i < 4; ++i) intCol.Add(i % 2); ReadOnlyObservableCollection<int> intReadOnlyCol = new ReadOnlyObservableCollection<int>(intCol); Assert.Equal(0, intReadOnlyCol.IndexOf(0)); Assert.Equal(1, intReadOnlyCol.IndexOf(1)); IList colAsIList = (IList)intReadOnlyCol; var index = colAsIList.IndexOf("stringObj"); Assert.Equal(-1, index); } /// <summary> /// Tests that a ReadOnlyDictionary cannot be modified. That is, that /// Add, Remove, Clear does not work. /// </summary> [Fact] public static void CannotModifyDictionaryTests_Negative() { string[] anArray = new string[] { "one", "two", "three", "four", "five" }; ReadOnlyObservableCollection<string> readOnlyCol = new ReadOnlyObservableCollection<string>(new ObservableCollection<string>(anArray)); IReadOnlyList_T_Test<string> helper = new IReadOnlyList_T_Test<string>(); IList<string> readOnlyColAsIList = readOnlyCol; Assert.Throws<NotSupportedException>(() => readOnlyColAsIList.Add("seven")); Assert.Throws<NotSupportedException>(() => readOnlyColAsIList.Insert(0, "nine")); Assert.Throws<NotSupportedException>(() => readOnlyColAsIList.Remove("one")); Assert.Throws<NotSupportedException>(() => readOnlyColAsIList.RemoveAt(0)); Assert.Throws<NotSupportedException>(() => readOnlyColAsIList.Clear()); helper.VerifyReadOnlyCollection(readOnlyCol, anArray); } [Fact] [ActiveIssue("https://github.com/dotnet/runtime/issues/57588", typeof(PlatformDetection), nameof(PlatformDetection.IsBuiltWithAggressiveTrimming), nameof(PlatformDetection.IsBrowser))] public static void DebuggerAttribute_Tests() { ReadOnlyObservableCollection<int> col = new ReadOnlyObservableCollection<int>(new ObservableCollection<int>(new[] {1, 2, 3, 4})); DebuggerAttributes.ValidateDebuggerDisplayReferences(col); DebuggerAttributeInfo info = DebuggerAttributes.ValidateDebuggerTypeProxyProperties(col); PropertyInfo itemProperty = info.Properties.Single(pr => pr.GetCustomAttribute<DebuggerBrowsableAttribute>().State == DebuggerBrowsableState.RootHidden); int[] items = itemProperty.GetValue(info.Instance) as int[]; Assert.Equal(col, items); } [Fact] [ActiveIssue("https://github.com/dotnet/runtime/issues/57588", typeof(PlatformDetection), nameof(PlatformDetection.IsBuiltWithAggressiveTrimming), nameof(PlatformDetection.IsBrowser))] public static void DebuggerAttribute_NullCollection_ThrowsArgumentNullException() { TargetInvocationException ex = Assert.Throws<TargetInvocationException>(() => DebuggerAttributes.ValidateDebuggerTypeProxyProperties(typeof(ReadOnlyObservableCollection<int>), null)); ArgumentNullException argumentNullException = Assert.IsType<ArgumentNullException>(ex.InnerException); } } internal class IReadOnlyList_T_Test<T> { private readonly IReadOnlyList<T> _collection; private readonly T[] _expectedItems; /// <summary> /// Initializes a new instance of the IReadOnlyList_T_Test. /// </summary> /// <param name="collection">The collection to run the tests on.</param> /// <param name="expectedItems">The items expected to be in the collection.</param> public IReadOnlyList_T_Test(IReadOnlyList<T> collection, T[] expectedItems) { _collection = collection; _expectedItems = expectedItems; } public IReadOnlyList_T_Test() { } /// <summary> /// This verifies that the collection contains the expected items. /// </summary> public void InitialItems_Tests() { // Verify Count returns the expected value Assert.Equal(_expectedItems.Length, _collection.Count); // Verify the initial items in the collection VerifyReadOnlyCollection(_collection, _expectedItems); } /// <summary> /// Runs all of the valid tests on get Item. /// </summary> public void Item_get_Tests() { // Verify get_Item with valid item on Collection Verify_get(_collection, _expectedItems); } /// <summary> /// Runs all of the argument checking(invalid) tests on get Item. /// </summary> public void Item_get_Tests_Negative() { // Verify get_Item with index=Int32.MinValue AssertExtensions.Throws<ArgumentOutOfRangeException>("index", () => _collection[int.MinValue]); // Verify that the collection was not mutated VerifyReadOnlyCollection(_collection, _expectedItems); // Verify get_Item with index=-1 AssertExtensions.Throws<ArgumentOutOfRangeException>("index", () => _collection[-1]); // Verify that the collection was not mutated VerifyReadOnlyCollection(_collection, _expectedItems); if (_expectedItems.Length == 0) { // Verify get_Item with index=0 on Empty collection AssertExtensions.Throws<ArgumentOutOfRangeException>("index", () => _collection[0]); // Verify that the collection was not mutated VerifyReadOnlyCollection(_collection, _expectedItems); } else { // Verify get_Item with index=Count on Empty collection AssertExtensions.Throws<ArgumentOutOfRangeException>("index", () => _collection[_expectedItems.Length]); // Verify that the collection was not mutated VerifyReadOnlyCollection(_collection, _expectedItems); } } #region Helper Methods /// <summary> /// Verifies that the items in the collection match the expected items. /// </summary> internal void VerifyReadOnlyCollection(IReadOnlyList<T> collection, T[] items) { Verify_get(collection, items); VerifyGenericEnumerator(collection, items); VerifyEnumerator(collection, items); } /// <summary> /// Verifies that you can get all items that should be in the collection. /// </summary> private void Verify_get(IReadOnlyList<T> collection, T[] items) { Assert.Equal(items.Length, collection.Count); for (int i = 0; i < items.Length; i++) { int itemsIndex = i; Assert.Equal(items[itemsIndex], collection[i]); } } /// <summary> /// Verifies that the generic enumerator retrieves the correct items. /// </summary> private void VerifyGenericEnumerator(IReadOnlyList<T> collection, T[] expectedItems) { IEnumerator<T> enumerator = collection.GetEnumerator(); int iterations = 0; int expectedCount = expectedItems.Length; // There is a sequential order to the collection, so we're testing for that. while ((iterations < expectedCount) && enumerator.MoveNext()) { T currentItem = enumerator.Current; T tempItem; // Verify we have not gotten more items then we expected Assert.True(iterations < expectedCount, "Err_9844awpa More items have been returned from the enumerator(" + iterations + " items) than are in the expectedElements(" + expectedCount + " items)"); // Verify Current returned the correct value Assert.Equal(currentItem, expectedItems[iterations]); // Verify Current always returns the same value every time it is called for (int i = 0; i < 3; i++) { tempItem = enumerator.Current; Assert.Equal(currentItem, tempItem); } iterations++; } Assert.Equal(expectedCount, iterations); for (int i = 0; i < 3; i++) { Assert.False(enumerator.MoveNext(), "Err_2929ahiea Expected MoveNext to return false after" + iterations + " iterations"); } enumerator.Dispose(); } /// <summary> /// Verifies that the non-generic enumerator retrieves the correct items. /// </summary> private void VerifyEnumerator(IReadOnlyList<T> collection, T[] expectedItems) { IEnumerator enumerator = collection.GetEnumerator(); int iterations = 0; int expectedCount = expectedItems.Length; // There is no sequential order to the collection, so we're testing that all the items // in the readonlydictionary exist in the array. bool[] itemsVisited = new bool[expectedCount]; bool itemFound; while ((iterations < expectedCount) && enumerator.MoveNext()) { object currentItem = enumerator.Current; object tempItem; // Verify we have not gotten more items then we expected Assert.True(iterations < expectedCount, "Err_9844awpa More items have been returned from the enumerator(" + iterations + " items) then are in the expectedElements(" + expectedCount + " items)"); // Verify Current returned the correct value itemFound = false; for (int i = 0; i < itemsVisited.Length; ++i) { if (!itemsVisited[i] && expectedItems[i].Equals(currentItem)) { itemsVisited[i] = true; itemFound = true; break; } } Assert.True(itemFound, "Err_1432pauy Current returned unexpected value=" + currentItem); // Verify Current always returns the same value every time it is called for (int i = 0; i < 3; i++) { tempItem = enumerator.Current; Assert.Equal(currentItem, tempItem); } iterations++; } for (int i = 0; i < expectedCount; ++i) { Assert.True(itemsVisited[i], "Err_052848ahiedoi Expected Current to return true for item: " + expectedItems[i] + "index: " + i); } Assert.Equal(expectedCount, iterations); for (int i = 0; i < 3; i++) { Assert.False(enumerator.MoveNext(), "Err_2929ahiea Expected MoveNext to return false after" + iterations + " iterations"); } } #endregion } }
-1
dotnet/runtime
66,235
Revert "Fix issues related to JsonSerializerOptions mutation and caching."
Reverts dotnet/runtime#65863 Fixes #66232
jkotas
2022-03-05T06:23:45Z
2022-03-05T14:08:07Z
17662fc30cfd4cc6e7dbba978a3fb512380c0b70
3ede8095da51d5d27a890020b61376155e9a61c2
Revert "Fix issues related to JsonSerializerOptions mutation and caching.". Reverts dotnet/runtime#65863 Fixes #66232
./src/tests/Loader/classloader/TypeGeneratorTests/TypeGeneratorTest905/Generated905.ilproj
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <ItemGroup> <Compile Include="Generated905.il" /> </ItemGroup> <ItemGroup> <ProjectReference Include="..\TestFramework\TestFramework.csproj" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <ItemGroup> <Compile Include="Generated905.il" /> </ItemGroup> <ItemGroup> <ProjectReference Include="..\TestFramework\TestFramework.csproj" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,235
Revert "Fix issues related to JsonSerializerOptions mutation and caching."
Reverts dotnet/runtime#65863 Fixes #66232
jkotas
2022-03-05T06:23:45Z
2022-03-05T14:08:07Z
17662fc30cfd4cc6e7dbba978a3fb512380c0b70
3ede8095da51d5d27a890020b61376155e9a61c2
Revert "Fix issues related to JsonSerializerOptions mutation and caching.". Reverts dotnet/runtime#65863 Fixes #66232
./src/libraries/System.IO.FileSystem/tests/FileStream/ctor_str_fm_fa_fs_buffer_async.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using Xunit; namespace System.IO.Tests { public class FileStream_ctor_str_fm_fa_fs_buffer_async : FileStream_ctor_str_fm_fa_fs_buffer { protected sealed override FileStream CreateFileStream(string path, FileMode mode, FileAccess access, FileShare share, int bufferSize) { return CreateFileStream(path, mode, access, share, bufferSize, false); } protected virtual FileStream CreateFileStream(string path, FileMode mode, FileAccess access, FileShare share, int bufferSize, bool useAsync) { return new FileStream(path, mode, access, share, bufferSize, useAsync); } [Theory] [InlineData(true)] [InlineData(false)] public void ValidUseAsync(bool isAsync) { using (FileStream fs = CreateFileStream(GetTestFilePath(), FileMode.Create, FileAccess.ReadWrite, FileShare.Read, c_DefaultBufferSize, isAsync)) { Assert.Equal(isAsync, fs.IsAsync); } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using Xunit; namespace System.IO.Tests { public class FileStream_ctor_str_fm_fa_fs_buffer_async : FileStream_ctor_str_fm_fa_fs_buffer { protected sealed override FileStream CreateFileStream(string path, FileMode mode, FileAccess access, FileShare share, int bufferSize) { return CreateFileStream(path, mode, access, share, bufferSize, false); } protected virtual FileStream CreateFileStream(string path, FileMode mode, FileAccess access, FileShare share, int bufferSize, bool useAsync) { return new FileStream(path, mode, access, share, bufferSize, useAsync); } [Theory] [InlineData(true)] [InlineData(false)] public void ValidUseAsync(bool isAsync) { using (FileStream fs = CreateFileStream(GetTestFilePath(), FileMode.Create, FileAccess.ReadWrite, FileShare.Read, c_DefaultBufferSize, isAsync)) { Assert.Equal(isAsync, fs.IsAsync); } } } }
-1
dotnet/runtime
66,235
Revert "Fix issues related to JsonSerializerOptions mutation and caching."
Reverts dotnet/runtime#65863 Fixes #66232
jkotas
2022-03-05T06:23:45Z
2022-03-05T14:08:07Z
17662fc30cfd4cc6e7dbba978a3fb512380c0b70
3ede8095da51d5d27a890020b61376155e9a61c2
Revert "Fix issues related to JsonSerializerOptions mutation and caching.". Reverts dotnet/runtime#65863 Fixes #66232
./src/tests/JIT/jit64/valuetypes/nullable/castclass/castclass/castclass033.csproj
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <DebugType>PdbOnly</DebugType> </PropertyGroup> <ItemGroup> <Compile Include="castclass033.cs" /> <Compile Include="..\structdef.cs" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <DebugType>PdbOnly</DebugType> </PropertyGroup> <ItemGroup> <Compile Include="castclass033.cs" /> <Compile Include="..\structdef.cs" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,235
Revert "Fix issues related to JsonSerializerOptions mutation and caching."
Reverts dotnet/runtime#65863 Fixes #66232
jkotas
2022-03-05T06:23:45Z
2022-03-05T14:08:07Z
17662fc30cfd4cc6e7dbba978a3fb512380c0b70
3ede8095da51d5d27a890020b61376155e9a61c2
Revert "Fix issues related to JsonSerializerOptions mutation and caching.". Reverts dotnet/runtime#65863 Fixes #66232
./src/coreclr/debug/inc/dacdbistructures.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. //***************************************************************************** // File: DacDbiStructures.h // // // Declarations and inline functions for data structures shared between by the // DAC/DBI interface functions and the right side. // // Note that for MAC these structures are marshalled between Windows and Mac // and so their layout and size must be identical in both builds. Use the // MSLAYOUT macro on every structure to avoid compiler packing differences. // //***************************************************************************** #ifndef DACDBISTRUCTURES_H_ #define DACDBISTRUCTURES_H_ #include "./common.h" //------------------------------------------------------------------------------- // classes shared by the DAC/DBI interface functions and the right side //------------------------------------------------------------------------------- // DacDbiArrayList encapsulates an array and the number of elements in the array. // Notes: // - storage is always on the DacDbi heap // - this class owns the memory. Its dtor will free. // - Operations that initialize list elements use the assignment // operator defined for type T. If T is a pointer type or has pointer // type components and no assignment operator override, this will make a shallow copy of // the element. If T has an assignment operator override that makes a deep copy of pointer // types, T must also have a destructor that will deallocate any memory allocated. // - this is NOT thread safe!!! // - the array elements are always mutable, but the number of elements is fixed between allocations // - you can gain access to the array using &(list[0]) but this is NOT safe if the array is empty. You // can call IsEmpty to determine if it is safe to access the array portion // This list is not designed to have unused elements at the end of the array (extra space) nor to be growable // usage examples: // typedef DacDbiArrayList<Bar> BarList; // handy typedef // void GetAListOfBars(BarList * pBarList) // { // DacDbiArrayList<Foo> fooList; // fooList is an empty array of objects of type Foo // int elementCount = GetNumberOfFoos(); // Bar * pBars = new Bar[elementCount]; // // fooList.Alloc(elementCount); // get space for the list of Foo instances // for (int i = 0; i < fooList.Count(); ++i) // { // fooList[i] = GetNextFoo(); // copy elements into the list // } // ConvertFoosToBars(pBars, &fooList); // always pass by reference // pBarList->Init(pBars, fooList.Count()); // initialize a list // } // // void ConvertFoosToBars(Bar * pBars, DacDbiArrayList<Foo> * pFooList) // { // for (int i = 0; i < pFooList->Count(); ++i) // { // if ((*pFooList)[i].IsBaz()) // pBars [i] = ConvertBazToBar(&(*pFooList)[i]); // else pBars [i] = (*pFooList)[i].barPart; // } // } // template<class T> class MSLAYOUT DacDbiArrayList { public: // construct an empty list DacDbiArrayList(); // deep copy constructor DacDbiArrayList(const T * list, int count); // destructor--sets list to empty state ~DacDbiArrayList(); // explicitly deallocate the list and set it back to the empty state void Dealloc(); // allocate a list with space for nElements items void Alloc(int nElements); // allocate and initialize a DacDbiArrayList from an array of type T and a count void Init(const T * list, int count); // predicate to indicate if the list is empty bool IsEmpty() { return m_nEntries == 0; } // read-only element accessor const T & operator [](int index) const; // writeable element accessor T & operator [](int index); // returns the number of elements in the list unsigned int Count() const; // @dbgtodo Mac - cleaner way to expose this for serialization? void PrepareForDeserialize() { m_pList = NULL; } private: // because these are private (and unimplemented), calls will generate a compiler (or linker) error. // This prevents accidentally invoking the default (shallow) copy ctor or assignment operator. // This prevents having multiple instances point to the same list memory (eg. due to passing by value), // which would result in memory corruption when the first copy is destroyed and the list memory is deallocated. DacDbiArrayList(const DacDbiArrayList<T> & sourceList); T & operator = (const DacDbiArrayList<T> & rhs); // data members protected: T * m_pList; // the list // - the count is managed by the member functions and is not settable, so (m_pList == NULL) == (m_nEntries == 0) // is always true. int m_nEntries; // the number of items in the list }; // Describes a buffer in the target struct MSLAYOUT TargetBuffer { TargetBuffer(); TargetBuffer(CORDB_ADDRESS pBuffer, ULONG cbSizeInput); // @dbgtodo : This ctor form confuses target and host address spaces. This should probably be PTR_VOID instead of void* TargetBuffer(void * pBuffer, ULONG cbSizeInput); // // Helper methods // // Return a sub-buffer that's starts at byteOffset within this buffer and runs to the end. TargetBuffer SubBuffer(ULONG byteOffset) const; // Return a sub-buffer that starts at byteOffset within this buffer and is byteLength long. TargetBuffer SubBuffer(ULONG byteOffset, ULONG byteLength) const; // Returns true if the buffer length is 0. bool IsEmpty() const; // Sets address to NULL and size to 0 // IsEmpty() will be true after this. void Clear(); // Initialize fields void Init(CORDB_ADDRESS address, ULONG size); // Target address of buffer CORDB_ADDRESS pAddress; // Size of buffer in bytes ULONG cbSize; }; //=================================================================================== // Module properties, retrieved by DAC. // Describes a VMPTR_DomainAssembly representing a module. // In the VM, a raw Module may be domain neutral and shared by many appdomains. // Whereas a DomainAssembly is like a { AppDomain, Module} pair. DomainAssembly corresponds // much more to ICorDebugModule (which also has appdomain affinity). //=================================================================================== struct MSLAYOUT DomainAssemblyInfo { // The appdomain that the DomainAssembly is associated with. // Although VMPTR_Module may be shared across multiple domains, a DomainAssembly has appdomain affinity. VMPTR_AppDomain vmAppDomain; // The assembly this module belongs to. All modules live in an assembly. VMPTR_DomainAssembly vmDomainAssembly; }; struct MSLAYOUT ModuleInfo { // The non-domain specific assembly which this module resides in. VMPTR_Assembly vmAssembly; // The PE Base address and size of the module. These may be 0 if there is no image // (such as for a dynamic module that's not persisted to disk). CORDB_ADDRESS pPEBaseAddress; // The PEAssembly associated with the module. Every module (even non-file-based ones) has a PEAssembly. // This is critical because DAC may ask for a metadata importer via PE-file. // a PEAssembly may have 1 or more PEImage child objects (1 for IL, 1 for native image, etc) VMPTR_PEAssembly vmPEAssembly; // The PE Base address and size of the module. These may be 0 if there is no image // (such as for a dynamic module that's not persisted to disk). ULONG nPESize; // Is this a dynamic (reflection.emit) module? // This means that new classes can be added to the module; and so // the module's metadata and symbols can be updated. Debugger will have to do extra work // to keep up with the updates. // Dynamic modules may be transient (entirely in-memory) or persisted to disk (have a file associated with them). BOOL fIsDynamic; // Is this an inmemory module? // Assemblies can be instantiated purely in-memory from just a Byte[]. // This means the module (and pdb) are not in files, and thus the debugger // needs to do extra work to retrieve them from the Target's memory. BOOL fInMemory; }; // the following two classes track native offsets for local variables and sequence // points. This information is initialized on demand. //=================================================================================== // NativeVarData holds a list of structs that provide the following information for // each local variable and fixed argument in a function: the offsets between which the // variable or argument lives in a particular location, the location itself, and the // variable number (ID). This allows us to determine where a value is at any given IP. // Lifetime management of the list is the responsibility of the NativeVarData class. // Callers that allocate memory for a new list should NOT maintain a separate pointer // to the list. // The arguments we track are the "fixed" arguments, specifically, the explicit arguments // that appear in the source code and the "this" pointer for non-static methods. // Varargs and other implicit arguments, such as the generic handle are counted in // CordbJITILFrame::m_allArgsCount. // Although logically, we really don't differentiate between arguments and locals when // all we want to know is where to find a value, we need to have two // separate counts. The full explanation is in the comment in rsthread.cpp in // CordbJITILFrame::ILVariableToNative, but the short version is that it allows us to // compute the correct ID for a value. // m_fixedArgsCount, accessed through GetFixedArgCount, is the actual number of fixed // arguments. // m_allArgsCount, accessed through GetAllArgsCount, is the number of fixed args plus the // number of varargs. // The number of entries in m_offsetInfo, accessed through Count(), is NOT the // number of locals, nor the number of locals plus the number of arguments. It is the // number of entries in the list. Any particular value may have an arbitrary number of // entries, depending on how many different places it is stored during the execution of // the method. The list is not sorted, so searches for data within it must be linear. //=================================================================================== class MSLAYOUT NativeVarData { public: // constructor NativeVarData(); // destructor ~NativeVarData(); // initialize the list of native var information structures, including the starting address of the list // (m_pOffsetInfo, the number of entries (m_count) and the number of fixed args (m_fixedArgsCount). // NativeVarData will manage the lifetime of the allocated memory for the list, so the caller should not // hold on to its address. void InitVarDataList(ICorDebugInfo::NativeVarInfo * plistStart, int fixedArgCount, int entryCount); private: // non-existent copy constructor to disable the (shallow) compiler-generated // one. If you attempt to use this, you will get a compiler or linker error. NativeVarData(const NativeVarData & rhs) {}; // non-existent assignment operator to disable the (shallow) compiler-generated // one. If you attempt to use this, you will get a compiler or linker error. NativeVarData & operator=(const NativeVarData & rhs); //---------------------------------------------------------------------------------- // Accessor Functions //---------------------------------------------------------------------------------- public: // get the list of native offset info const DacDbiArrayList<ICorDebugInfo::NativeVarInfo> * GetOffsetInfoList() const { _ASSERTE(m_fInitialized); return &m_offsetInfo; } // get the number of explicit arguments for this function--this // includes the fixed arguments for vararg methods, but not the variable ones ULONG32 GetFixedArgCount() { _ASSERTE(IsInitialized()); // this count includes explicit arguments plus one for the "this" pointer // but doesn't count varargs return m_fixedArgsCount; } // get the number of all arguments, including varargs ULONG32 GetAllArgsCount() { _ASSERTE(IsInitialized()); return m_allArgsCount; } // set the number of all arguments, including varargs void SetAllArgsCount(ULONG32 count) { m_allArgsCount = count; } // determine whether we have successfully initialized this BOOL IsInitialized() { return m_fInitialized == true; } //---------------------------------------------------------------------------------- // Data Members //---------------------------------------------------------------------------------- // @dbgtodo Mac - making this public for serializing for remote DAC on mac. Need to make this private again. public: // contains a list of structs providing information about the location of a local // variable or argument between a pair of offsets and the number of entries in the list DacDbiArrayList<ICorDebugInfo::NativeVarInfo> m_offsetInfo; // number of fixed arguments to the function i.e., the explicit arguments and "this" pointer ULONG32 m_fixedArgsCount; // number of fixed arguments plus number of varargs ULONG32 m_allArgsCount; // indicates whether an attempt has been made to initialize the var data already bool m_fInitialized; }; // class NativeVarData //=================================================================================== // SequencePoints holds a list of sequence points that map IL offsets to native offsets. In addition, // it keeps track of the number of entries in the list and whether the list is sorted. //=================================================================================== class MSLAYOUT SequencePoints { public: SequencePoints(); ~SequencePoints(); // Initialize the m_pMap data member to the address of an allocated chunk // of memory (or to NULL if the count is zero). Set m_count as the // number of entries in the map. void InitSequencePoints(ULONG32 count); private: // non-existent copy constructor to disable the (shallow) compiler-generated // one. If you attempt to use this, you will get a compiler or linker error. SequencePoints(const SequencePoints & rhs) {}; // non-existent assignment operator to disable the (shallow) compiler-generated // one. If you attempt to use this, you will get a compiler or linker error. SequencePoints & operator=(const SequencePoints & rhs); //---------------------------------------------------------------------------------- // class MapSortILMap: A template class that will sort an array of DebuggerILToNativeMap. // This class is intended to be instantiated on the stack / in temporary storage, and used // to reorder the sequence map. //---------------------------------------------------------------------------------- class MapSortILMap : public CQuickSort<DebuggerILToNativeMap> { public: //Constructor MapSortILMap(DebuggerILToNativeMap * map, int count) : CQuickSort<DebuggerILToNativeMap>(map, count) {} // secondary key comparison--if two IL offsets are the same, // we determine order based on native offset int CompareInternal(DebuggerILToNativeMap * first, DebuggerILToNativeMap * second); //Comparison operator int Compare(DebuggerILToNativeMap * first, DebuggerILToNativeMap * second); }; //---------------------------------------------------------------------------------- // Accessor Functions //---------------------------------------------------------------------------------- public: // @dbgtodo Microsoft inspection: It would be very nice not to need this at all. Ideally, // it would be better to make ExportILToNativeMap expect a DacDbiArrayList instead of the // array and size. At present, there's a call to ExportILToNativeMap in debugger.cpp where // DacDbiArrayLists aren't available, so at present, we need to pass the array and size. // We should be able to eliminate the debugger.cpp call when we get rid of in-proc // inspection. At that point, we can delete this function too, as well as GetEntryCount. // In the meantime, it would be great if no one else took a dependency on this. // get value of m_pMap DebuggerILToNativeMap * GetMapAddr() { // Please don't call this function _ASSERTE(m_fInitialized); return &(m_map[0]); } // get value of m_count ULONG32 GetEntryCount() { _ASSERTE(m_fInitialized); return m_mapCount; } ULONG32 GetCallsiteEntryCount() { _ASSERTE(m_fInitialized); return m_map.Count() - m_mapCount; //m_map.Count(); } DebuggerILToNativeMap * GetCallsiteMapAddr() { // Please don't call this function _ASSERTE(m_fInitialized); if (m_map.Count() == m_mapCount) return NULL; return &(m_map[m_mapCount]); } // determine whether we have initialized this BOOL IsInitialized() { return m_fInitialized == true; } // Copy data from the VM map data to our own map structure and sort. The // information comes to us in a data structure that differs slightly from the // one we use out of process, so we have to copy it to the right-side struct. void CopyAndSortSequencePoints(const ICorDebugInfo::OffsetMapping mapCopy[]); // Set the IL offset of the last sequence point before the epilog. // If a native offset maps to the epilog, we will return the this IL offset. void SetLastILOffset(ULONG32 lastILOffset) { _ASSERTE(m_fInitialized); m_lastILOffset = lastILOffset; } // Map the given native offset to IL offset. Also return the mapping type. DWORD MapNativeOffsetToIL(DWORD dwNativeOffset, CorDebugMappingResult *pMapType); //---------------------------------------------------------------------------------- // Data Members //---------------------------------------------------------------------------------- // @dbgtodo Mac - making this public for serializing for remote DAC on mac. Need to make this private again. public: // map of IL to native offsets for sequence points DacDbiArrayList<DebuggerILToNativeMap> m_map; // ULONG32 m_mapCount; // the IL offset of the last sequence point before the epilog ULONG32 m_lastILOffset; // indicates whether an attempt has been made to initialize the sequence points already bool m_fInitialized; }; // class SequencePoints //---------------------------------------------------------------------------------- // declarations needed for getting native code regions //---------------------------------------------------------------------------------- // Code may be split into Hot & Cold regions, so we need an extra address & size. // The jitter doesn't do this optimization w/ debuggable code, so we'll // rarely see the cold region information as non-null values. // This enumeration provides symbolic indices into m_rgCodeRegions. typedef enum {kHot = 0, kCold, MAX_REGIONS} CodeBlobRegion; // This contains the information we need to initialize a CordbNativeCode object class MSLAYOUT NativeCodeFunctionData { public: // set all fields to default values (NULL, FALSE, or zero as appropriate) NativeCodeFunctionData(); // conversion constructor to convert from an instance of DebuggerIPCE_JITFUncData to an instance of // NativeCodeFunctionData. NativeCodeFunctionData(DebuggerIPCE_JITFuncData * source); // The hot region start address could be NULL in the following circumstances: // 1. We haven't yet tried to get the information // 2. We tried to get the information, but the function hasn't been jitted yet // 3. We tried to get the information, but the MethodDesc wasn't available yet (very early in // module initialization), which implies that the code isn't available either. // 4. We tried to get the information, but a method edit has reset the MethodDesc, but the // method hasn't been jitted yet. // In all cases, we can check the hot region start address to determine whether the rest of the // the information is valid. BOOL IsValid() { return (m_rgCodeRegions[kHot].pAddress != NULL); } void Clear(); // data members // start addresses and sizes of hot & cold regions TargetBuffer m_rgCodeRegions[MAX_REGIONS]; // indicates whether the function is a generic function, or a method inside a generic class (or both). BOOL isInstantiatedGeneric; // MethodDesc for the function VMPTR_MethodDesc vmNativeCodeMethodDescToken; // EnC version number of the function SIZE_T encVersion; }; //---------------------------------------------------------------------------------- // declarations needed for getting type information //---------------------------------------------------------------------------------- // FieldData holds data for each field within a class or type. This data // is passed from the DAC to the DI in response to a request for class info. // This type is also used by CordbClass and CordbType to hold the list of fields for the // class. class MSLAYOUT FieldData { public: #ifndef RIGHT_SIDE_COMPILE // initialize various fields of an instance of FieldData from information in a FieldDesc void Initialize(BOOL fIsStatic, BOOL fIsPrimitive, mdFieldDef mdToken); #else HRESULT GetFieldSignature(class CordbModule * pModule, /*OUT*/ SigParser * pSigParser); #endif // clear various fields for a new instance of FieldData void ClearFields(); // Make sure it's okay to get or set an instance field offset. BOOL OkToGetOrSetInstanceOffset(); // Make sure it's okay to get or set a static field address. BOOL OkToGetOrSetStaticAddress(); // If this is an instance field, store its offset void SetInstanceOffset( SIZE_T offset ); // If this is a "normal" static, store its absolute address void SetStaticAddress( TADDR addr ); // If this is an instance field, return its offset // Note that this offset is allways a real offset (possibly larger than 22 bits), which isn't // necessarily the same as the overloaded FieldDesc.dwOffset field which can have // some special FIELD_OFFSET tokens. SIZE_T GetInstanceOffset(); // If this is a "normal" static, get its absolute address // TLS and context-specific statics are "special". TADDR GetStaticAddress(); // // Data members // mdFieldDef m_fldMetadataToken; // m_fFldStorageAvailable is true whenever the storage for this field is available. // If this is a field that is newly added with EnC and hasn't had any storage // allocated yet, then fldEnCAvailable will be false. BOOL m_fFldStorageAvailable; // Bits that specify what type of field this is bool m_fFldIsStatic; // true if static field, false if instance field bool m_fFldIsRVA; // true if static relative to module address bool m_fFldIsTLS; // true if thread-specific static bool m_fFldIsPrimitive; // Only true if this is a value type masquerading as a primitive. bool m_fFldIsCollectibleStatic; // true if this is a static field on a collectible type private: // The m_fldInstanceOffset and m_pFldStaticAddress are mutually exclusive. Only one is ever set at a time. SIZE_T m_fldInstanceOffset; // The offset of a field within an object instance // For EnC fields, this isn't actually within the object instance, // but has been cooked to still be relative to the beginning of // the object. TADDR m_pFldStaticAddress; // The absolute target address of a static field PCCOR_SIGNATURE m_fldSignatureCache; // This is passed across as null. It is a RS-only cache, and SHOULD // NEVER BE ACCESSED DIRECTLY! ULONG m_fldSignatureCacheSize; // This is passed across as 0. It is a RS-only cache, and SHOULD // NEVER BE ACCESSED DIRECTLY! public: VMPTR_FieldDesc m_vmFieldDesc; }; // class FieldData // ClassInfo holds information about a type (class or other structured type), including a list of its fields class MSLAYOUT ClassInfo { public: ClassInfo(); ~ClassInfo(); void Clear(); // Size of object in bytes, for non-generic types. Note: this is NOT valid for constructed value types, // e.g. value type Pair<DateTime,int>. Use CordbType::m_objectSize instead. SIZE_T m_objectSize; // list of structs containing information about all the fields in this Class, along with the number of entries // in the list. Does not include inherited fields. DON'T KEEP POINTERS TO ELEMENTS OF m_fieldList AROUND!! // This may be deleted if the class gets EnC'd. DacDbiArrayList<FieldData> m_fieldList; }; // class ClassInfo // EnCHangingFieldInfo holds information describing a field added with Edit And Continue. This data // is passed from the DAC to the DI in response to a request for EnC field info. class MSLAYOUT EnCHangingFieldInfo { public: // Init will initialize fields, taking into account whether the field is static or not. void Init(VMPTR_Object pObject, SIZE_T offset, mdFieldDef fieldToken, CorElementType elementType, mdTypeDef metadataToken, VMPTR_DomainAssembly vmDomainAssembly); DebuggerIPCE_BasicTypeData GetObjectTypeData() const { return m_objectTypeData; }; mdFieldDef GetFieldToken() const { return m_fldToken; }; VMPTR_Object GetVmObject() const { return m_vmObject; }; SIZE_T GetOffsetToVars() const { return m_offsetToVars; }; private: DebuggerIPCE_BasicTypeData m_objectTypeData; // type data for the EnC field VMPTR_Object m_vmObject; // object instance to which the field has been added--if the field is // static, this will be NULL instead of pointing to an instance SIZE_T m_offsetToVars; // offset to the beginning of variable storage in the object mdFieldDef m_fldToken; // metadata token for the added field }; // EnCHangingFieldInfo // TypeHandleToExpandedTypeInfo returns different DebuggerIPCE_ExpandedTypeData objects // depending on whether the object value that the TypeData corresponds to is // boxed or not. Different parts of the API transfer objects in slightly different ways. // AllBoxed: // For GetAndSendObjectData all values are boxed, // // OnlyPrimitivesUnboxed: // When returning results from FuncEval only "true" structs // get boxed, i.e. primitives are unboxed. // // NoValueTypeBoxing: // TypeHandleToExpandedTypeInfo is also used to report type parameters, // and in this case none of the types are considered boxed ( enum AreValueTypesBoxed { NoValueTypeBoxing, OnlyPrimitivesUnboxed, AllBoxed }; // TypeRefData is used for resolving a type reference (see code:CordbModule::ResolveTypeRef and // code:DacDbiInterfaceImpl::ResolveTypeReference) to store relevant information about the type typedef struct MSLAYOUT { // domain file for the type VMPTR_DomainAssembly vmDomainAssembly; // metadata token for the type. This may be a typeRef (for requests) or a typeDef (for responses). mdToken typeToken; } TypeRefData; // @dbgtodo Microsoft inspection: get rid of IPCE type. // TypeInfoList encapsulates a list of type data instances and the length of the list. typedef DacDbiArrayList<DebuggerIPCE_TypeArgData> TypeInfoList; // ArgInfoList encapsulates a list of type data instances for arguments for a top-level // type and the length of the list. typedef DacDbiArrayList<DebuggerIPCE_BasicTypeData> ArgInfoList; // TypeParamsList encapsulate a list of type parameters and the length of the list typedef DacDbiArrayList<DebuggerIPCE_ExpandedTypeData> TypeParamsList; // A struct for passing version information from DBI to DAC. // See code:CordbProcess::CordbProcess#DBIVersionChecking for more information. const DWORD kCurrentDacDbiProtocolBreakingChangeCounter = 1; struct DbiVersion { DWORD m_dwFormat; // the format of this DbiVersion instance DWORD m_dwDbiVersionMS; // version of the DBI DLL, in the convention used by VS_FIXEDFILEINFO DWORD m_dwDbiVersionLS; DWORD m_dwProtocolBreakingChangeCounter; // initially this was reserved and always set to 0 // Now we use it as a counter to explicitly introduce breaking changes // between DBI and DAC when we have our IPC transport in the middle // If DBI and DAC don't agree on the same value CheckDbiVersion will return CORDBG_E_INCOMPATIBLE_PROTOCOL // Please document every time this value changes // 0 - initial value // 1 - Indicates that the protocol now supports the GetRemoteInterfaceHashAndTimestamp message // The message must have ID 2, with signature: // OUT DWORD & hash1, OUT DWORD & hash2, OUT DWORD & hash3, OUT DWORD & hash4, OUT DWORD & timestamp1, OUT DWORD & timestamp2 // The hash can be used as an indicator of many other breaking changes providing // easier automated enforcement during development. It is NOT recommended to use // the hash as a release versioning mechanism however. DWORD m_dwReservedMustBeZero1; // reserved for future use }; // The way in which a thread is blocking on an object enum DacBlockingReason { DacBlockReason_MonitorCriticalSection, DacBlockReason_MonitorEvent }; // Information about an object which is blocking a managed thread struct DacBlockingObject { VMPTR_Object vmBlockingObject; VMPTR_AppDomain vmAppDomain; DWORD dwTimeout; DacBlockingReason blockingReason; }; // Opaque user defined data used in callbacks typedef void* CALLBACK_DATA; struct MonitorLockInfo { VMPTR_Thread lockOwner; DWORD acquisitionCount; }; struct MSLAYOUT DacGcReference { VMPTR_AppDomain vmDomain; // The AppDomain of the handle/object, may be null. union { CORDB_ADDRESS pObject; // A managed object, with the low bit set. VMPTR_OBJECTHANDLE objHnd; // A reference to the object, valid if (pAddress & 1) == 0 }; DWORD dwType; // Where the root came from. /* DependentSource - for HandleDependent RefCount - for HandleStrongRefCount Size - for HandleSizedByref */ UINT64 i64ExtraData; }; // struct DacGcReference struct MSLAYOUT DacExceptionCallStackData { VMPTR_AppDomain vmAppDomain; VMPTR_DomainAssembly vmDomainAssembly; CORDB_ADDRESS ip; mdMethodDef methodDef; BOOL isLastForeignExceptionFrame; }; // These represent the various states a SharedReJitInfo can be in. enum DacSharedReJitInfoState { // The profiler has requested a ReJit, so we've allocated stuff, but we haven't // called back to the profiler to get any info or indicate that the ReJit has // started. (This Info can be 'reused' for a new ReJit if the // profiler calls RequestReJit again before we transition to the next state.) kStateRequested = 0x00000000, // We have asked the profiler about this method via ICorProfilerFunctionControl, // and have thus stored the IL and codegen flags the profiler specified. Can only // transition to kStateReverted from this state. kStateActive = 0x00000001, // The methoddef has been reverted, but not freed yet. It (or its instantiations // for generics) *MAY* still be active on the stack someplace or have outstanding // memory references. kStateReverted = 0x00000002, kStateMask = 0x0000000F, }; struct MSLAYOUT DacSharedReJitInfo { DWORD m_state; CORDB_ADDRESS m_pbIL; DWORD m_dwCodegenFlags; ULONG m_cInstrumentedMapEntries; CORDB_ADDRESS m_rgInstrumentedMapEntries; }; // These represent the allocated bytes so far on the thread. struct MSLAYOUT DacThreadAllocInfo { ULONG64 m_allocBytesSOH; ULONG64 m_allocBytesUOH; }; #include "dacdbistructures.inl" #endif // DACDBISTRUCTURES_H_
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. //***************************************************************************** // File: DacDbiStructures.h // // // Declarations and inline functions for data structures shared between by the // DAC/DBI interface functions and the right side. // // Note that for MAC these structures are marshalled between Windows and Mac // and so their layout and size must be identical in both builds. Use the // MSLAYOUT macro on every structure to avoid compiler packing differences. // //***************************************************************************** #ifndef DACDBISTRUCTURES_H_ #define DACDBISTRUCTURES_H_ #include "./common.h" //------------------------------------------------------------------------------- // classes shared by the DAC/DBI interface functions and the right side //------------------------------------------------------------------------------- // DacDbiArrayList encapsulates an array and the number of elements in the array. // Notes: // - storage is always on the DacDbi heap // - this class owns the memory. Its dtor will free. // - Operations that initialize list elements use the assignment // operator defined for type T. If T is a pointer type or has pointer // type components and no assignment operator override, this will make a shallow copy of // the element. If T has an assignment operator override that makes a deep copy of pointer // types, T must also have a destructor that will deallocate any memory allocated. // - this is NOT thread safe!!! // - the array elements are always mutable, but the number of elements is fixed between allocations // - you can gain access to the array using &(list[0]) but this is NOT safe if the array is empty. You // can call IsEmpty to determine if it is safe to access the array portion // This list is not designed to have unused elements at the end of the array (extra space) nor to be growable // usage examples: // typedef DacDbiArrayList<Bar> BarList; // handy typedef // void GetAListOfBars(BarList * pBarList) // { // DacDbiArrayList<Foo> fooList; // fooList is an empty array of objects of type Foo // int elementCount = GetNumberOfFoos(); // Bar * pBars = new Bar[elementCount]; // // fooList.Alloc(elementCount); // get space for the list of Foo instances // for (int i = 0; i < fooList.Count(); ++i) // { // fooList[i] = GetNextFoo(); // copy elements into the list // } // ConvertFoosToBars(pBars, &fooList); // always pass by reference // pBarList->Init(pBars, fooList.Count()); // initialize a list // } // // void ConvertFoosToBars(Bar * pBars, DacDbiArrayList<Foo> * pFooList) // { // for (int i = 0; i < pFooList->Count(); ++i) // { // if ((*pFooList)[i].IsBaz()) // pBars [i] = ConvertBazToBar(&(*pFooList)[i]); // else pBars [i] = (*pFooList)[i].barPart; // } // } // template<class T> class MSLAYOUT DacDbiArrayList { public: // construct an empty list DacDbiArrayList(); // deep copy constructor DacDbiArrayList(const T * list, int count); // destructor--sets list to empty state ~DacDbiArrayList(); // explicitly deallocate the list and set it back to the empty state void Dealloc(); // allocate a list with space for nElements items void Alloc(int nElements); // allocate and initialize a DacDbiArrayList from an array of type T and a count void Init(const T * list, int count); // predicate to indicate if the list is empty bool IsEmpty() { return m_nEntries == 0; } // read-only element accessor const T & operator [](int index) const; // writeable element accessor T & operator [](int index); // returns the number of elements in the list unsigned int Count() const; // @dbgtodo Mac - cleaner way to expose this for serialization? void PrepareForDeserialize() { m_pList = NULL; } private: // because these are private (and unimplemented), calls will generate a compiler (or linker) error. // This prevents accidentally invoking the default (shallow) copy ctor or assignment operator. // This prevents having multiple instances point to the same list memory (eg. due to passing by value), // which would result in memory corruption when the first copy is destroyed and the list memory is deallocated. DacDbiArrayList(const DacDbiArrayList<T> & sourceList); T & operator = (const DacDbiArrayList<T> & rhs); // data members protected: T * m_pList; // the list // - the count is managed by the member functions and is not settable, so (m_pList == NULL) == (m_nEntries == 0) // is always true. int m_nEntries; // the number of items in the list }; // Describes a buffer in the target struct MSLAYOUT TargetBuffer { TargetBuffer(); TargetBuffer(CORDB_ADDRESS pBuffer, ULONG cbSizeInput); // @dbgtodo : This ctor form confuses target and host address spaces. This should probably be PTR_VOID instead of void* TargetBuffer(void * pBuffer, ULONG cbSizeInput); // // Helper methods // // Return a sub-buffer that's starts at byteOffset within this buffer and runs to the end. TargetBuffer SubBuffer(ULONG byteOffset) const; // Return a sub-buffer that starts at byteOffset within this buffer and is byteLength long. TargetBuffer SubBuffer(ULONG byteOffset, ULONG byteLength) const; // Returns true if the buffer length is 0. bool IsEmpty() const; // Sets address to NULL and size to 0 // IsEmpty() will be true after this. void Clear(); // Initialize fields void Init(CORDB_ADDRESS address, ULONG size); // Target address of buffer CORDB_ADDRESS pAddress; // Size of buffer in bytes ULONG cbSize; }; //=================================================================================== // Module properties, retrieved by DAC. // Describes a VMPTR_DomainAssembly representing a module. // In the VM, a raw Module may be domain neutral and shared by many appdomains. // Whereas a DomainAssembly is like a { AppDomain, Module} pair. DomainAssembly corresponds // much more to ICorDebugModule (which also has appdomain affinity). //=================================================================================== struct MSLAYOUT DomainAssemblyInfo { // The appdomain that the DomainAssembly is associated with. // Although VMPTR_Module may be shared across multiple domains, a DomainAssembly has appdomain affinity. VMPTR_AppDomain vmAppDomain; // The assembly this module belongs to. All modules live in an assembly. VMPTR_DomainAssembly vmDomainAssembly; }; struct MSLAYOUT ModuleInfo { // The non-domain specific assembly which this module resides in. VMPTR_Assembly vmAssembly; // The PE Base address and size of the module. These may be 0 if there is no image // (such as for a dynamic module that's not persisted to disk). CORDB_ADDRESS pPEBaseAddress; // The PEAssembly associated with the module. Every module (even non-file-based ones) has a PEAssembly. // This is critical because DAC may ask for a metadata importer via PE-file. // a PEAssembly may have 1 or more PEImage child objects (1 for IL, 1 for native image, etc) VMPTR_PEAssembly vmPEAssembly; // The PE Base address and size of the module. These may be 0 if there is no image // (such as for a dynamic module that's not persisted to disk). ULONG nPESize; // Is this a dynamic (reflection.emit) module? // This means that new classes can be added to the module; and so // the module's metadata and symbols can be updated. Debugger will have to do extra work // to keep up with the updates. // Dynamic modules may be transient (entirely in-memory) or persisted to disk (have a file associated with them). BOOL fIsDynamic; // Is this an inmemory module? // Assemblies can be instantiated purely in-memory from just a Byte[]. // This means the module (and pdb) are not in files, and thus the debugger // needs to do extra work to retrieve them from the Target's memory. BOOL fInMemory; }; // the following two classes track native offsets for local variables and sequence // points. This information is initialized on demand. //=================================================================================== // NativeVarData holds a list of structs that provide the following information for // each local variable and fixed argument in a function: the offsets between which the // variable or argument lives in a particular location, the location itself, and the // variable number (ID). This allows us to determine where a value is at any given IP. // Lifetime management of the list is the responsibility of the NativeVarData class. // Callers that allocate memory for a new list should NOT maintain a separate pointer // to the list. // The arguments we track are the "fixed" arguments, specifically, the explicit arguments // that appear in the source code and the "this" pointer for non-static methods. // Varargs and other implicit arguments, such as the generic handle are counted in // CordbJITILFrame::m_allArgsCount. // Although logically, we really don't differentiate between arguments and locals when // all we want to know is where to find a value, we need to have two // separate counts. The full explanation is in the comment in rsthread.cpp in // CordbJITILFrame::ILVariableToNative, but the short version is that it allows us to // compute the correct ID for a value. // m_fixedArgsCount, accessed through GetFixedArgCount, is the actual number of fixed // arguments. // m_allArgsCount, accessed through GetAllArgsCount, is the number of fixed args plus the // number of varargs. // The number of entries in m_offsetInfo, accessed through Count(), is NOT the // number of locals, nor the number of locals plus the number of arguments. It is the // number of entries in the list. Any particular value may have an arbitrary number of // entries, depending on how many different places it is stored during the execution of // the method. The list is not sorted, so searches for data within it must be linear. //=================================================================================== class MSLAYOUT NativeVarData { public: // constructor NativeVarData(); // destructor ~NativeVarData(); // initialize the list of native var information structures, including the starting address of the list // (m_pOffsetInfo, the number of entries (m_count) and the number of fixed args (m_fixedArgsCount). // NativeVarData will manage the lifetime of the allocated memory for the list, so the caller should not // hold on to its address. void InitVarDataList(ICorDebugInfo::NativeVarInfo * plistStart, int fixedArgCount, int entryCount); private: // non-existent copy constructor to disable the (shallow) compiler-generated // one. If you attempt to use this, you will get a compiler or linker error. NativeVarData(const NativeVarData & rhs) {}; // non-existent assignment operator to disable the (shallow) compiler-generated // one. If you attempt to use this, you will get a compiler or linker error. NativeVarData & operator=(const NativeVarData & rhs); //---------------------------------------------------------------------------------- // Accessor Functions //---------------------------------------------------------------------------------- public: // get the list of native offset info const DacDbiArrayList<ICorDebugInfo::NativeVarInfo> * GetOffsetInfoList() const { _ASSERTE(m_fInitialized); return &m_offsetInfo; } // get the number of explicit arguments for this function--this // includes the fixed arguments for vararg methods, but not the variable ones ULONG32 GetFixedArgCount() { _ASSERTE(IsInitialized()); // this count includes explicit arguments plus one for the "this" pointer // but doesn't count varargs return m_fixedArgsCount; } // get the number of all arguments, including varargs ULONG32 GetAllArgsCount() { _ASSERTE(IsInitialized()); return m_allArgsCount; } // set the number of all arguments, including varargs void SetAllArgsCount(ULONG32 count) { m_allArgsCount = count; } // determine whether we have successfully initialized this BOOL IsInitialized() { return m_fInitialized == true; } //---------------------------------------------------------------------------------- // Data Members //---------------------------------------------------------------------------------- // @dbgtodo Mac - making this public for serializing for remote DAC on mac. Need to make this private again. public: // contains a list of structs providing information about the location of a local // variable or argument between a pair of offsets and the number of entries in the list DacDbiArrayList<ICorDebugInfo::NativeVarInfo> m_offsetInfo; // number of fixed arguments to the function i.e., the explicit arguments and "this" pointer ULONG32 m_fixedArgsCount; // number of fixed arguments plus number of varargs ULONG32 m_allArgsCount; // indicates whether an attempt has been made to initialize the var data already bool m_fInitialized; }; // class NativeVarData //=================================================================================== // SequencePoints holds a list of sequence points that map IL offsets to native offsets. In addition, // it keeps track of the number of entries in the list and whether the list is sorted. //=================================================================================== class MSLAYOUT SequencePoints { public: SequencePoints(); ~SequencePoints(); // Initialize the m_pMap data member to the address of an allocated chunk // of memory (or to NULL if the count is zero). Set m_count as the // number of entries in the map. void InitSequencePoints(ULONG32 count); private: // non-existent copy constructor to disable the (shallow) compiler-generated // one. If you attempt to use this, you will get a compiler or linker error. SequencePoints(const SequencePoints & rhs) {}; // non-existent assignment operator to disable the (shallow) compiler-generated // one. If you attempt to use this, you will get a compiler or linker error. SequencePoints & operator=(const SequencePoints & rhs); //---------------------------------------------------------------------------------- // class MapSortILMap: A template class that will sort an array of DebuggerILToNativeMap. // This class is intended to be instantiated on the stack / in temporary storage, and used // to reorder the sequence map. //---------------------------------------------------------------------------------- class MapSortILMap : public CQuickSort<DebuggerILToNativeMap> { public: //Constructor MapSortILMap(DebuggerILToNativeMap * map, int count) : CQuickSort<DebuggerILToNativeMap>(map, count) {} // secondary key comparison--if two IL offsets are the same, // we determine order based on native offset int CompareInternal(DebuggerILToNativeMap * first, DebuggerILToNativeMap * second); //Comparison operator int Compare(DebuggerILToNativeMap * first, DebuggerILToNativeMap * second); }; //---------------------------------------------------------------------------------- // Accessor Functions //---------------------------------------------------------------------------------- public: // @dbgtodo Microsoft inspection: It would be very nice not to need this at all. Ideally, // it would be better to make ExportILToNativeMap expect a DacDbiArrayList instead of the // array and size. At present, there's a call to ExportILToNativeMap in debugger.cpp where // DacDbiArrayLists aren't available, so at present, we need to pass the array and size. // We should be able to eliminate the debugger.cpp call when we get rid of in-proc // inspection. At that point, we can delete this function too, as well as GetEntryCount. // In the meantime, it would be great if no one else took a dependency on this. // get value of m_pMap DebuggerILToNativeMap * GetMapAddr() { // Please don't call this function _ASSERTE(m_fInitialized); return &(m_map[0]); } // get value of m_count ULONG32 GetEntryCount() { _ASSERTE(m_fInitialized); return m_mapCount; } ULONG32 GetCallsiteEntryCount() { _ASSERTE(m_fInitialized); return m_map.Count() - m_mapCount; //m_map.Count(); } DebuggerILToNativeMap * GetCallsiteMapAddr() { // Please don't call this function _ASSERTE(m_fInitialized); if (m_map.Count() == m_mapCount) return NULL; return &(m_map[m_mapCount]); } // determine whether we have initialized this BOOL IsInitialized() { return m_fInitialized == true; } // Copy data from the VM map data to our own map structure and sort. The // information comes to us in a data structure that differs slightly from the // one we use out of process, so we have to copy it to the right-side struct. void CopyAndSortSequencePoints(const ICorDebugInfo::OffsetMapping mapCopy[]); // Set the IL offset of the last sequence point before the epilog. // If a native offset maps to the epilog, we will return the this IL offset. void SetLastILOffset(ULONG32 lastILOffset) { _ASSERTE(m_fInitialized); m_lastILOffset = lastILOffset; } // Map the given native offset to IL offset. Also return the mapping type. DWORD MapNativeOffsetToIL(DWORD dwNativeOffset, CorDebugMappingResult *pMapType); //---------------------------------------------------------------------------------- // Data Members //---------------------------------------------------------------------------------- // @dbgtodo Mac - making this public for serializing for remote DAC on mac. Need to make this private again. public: // map of IL to native offsets for sequence points DacDbiArrayList<DebuggerILToNativeMap> m_map; // ULONG32 m_mapCount; // the IL offset of the last sequence point before the epilog ULONG32 m_lastILOffset; // indicates whether an attempt has been made to initialize the sequence points already bool m_fInitialized; }; // class SequencePoints //---------------------------------------------------------------------------------- // declarations needed for getting native code regions //---------------------------------------------------------------------------------- // Code may be split into Hot & Cold regions, so we need an extra address & size. // The jitter doesn't do this optimization w/ debuggable code, so we'll // rarely see the cold region information as non-null values. // This enumeration provides symbolic indices into m_rgCodeRegions. typedef enum {kHot = 0, kCold, MAX_REGIONS} CodeBlobRegion; // This contains the information we need to initialize a CordbNativeCode object class MSLAYOUT NativeCodeFunctionData { public: // set all fields to default values (NULL, FALSE, or zero as appropriate) NativeCodeFunctionData(); // conversion constructor to convert from an instance of DebuggerIPCE_JITFUncData to an instance of // NativeCodeFunctionData. NativeCodeFunctionData(DebuggerIPCE_JITFuncData * source); // The hot region start address could be NULL in the following circumstances: // 1. We haven't yet tried to get the information // 2. We tried to get the information, but the function hasn't been jitted yet // 3. We tried to get the information, but the MethodDesc wasn't available yet (very early in // module initialization), which implies that the code isn't available either. // 4. We tried to get the information, but a method edit has reset the MethodDesc, but the // method hasn't been jitted yet. // In all cases, we can check the hot region start address to determine whether the rest of the // the information is valid. BOOL IsValid() { return (m_rgCodeRegions[kHot].pAddress != NULL); } void Clear(); // data members // start addresses and sizes of hot & cold regions TargetBuffer m_rgCodeRegions[MAX_REGIONS]; // indicates whether the function is a generic function, or a method inside a generic class (or both). BOOL isInstantiatedGeneric; // MethodDesc for the function VMPTR_MethodDesc vmNativeCodeMethodDescToken; // EnC version number of the function SIZE_T encVersion; }; //---------------------------------------------------------------------------------- // declarations needed for getting type information //---------------------------------------------------------------------------------- // FieldData holds data for each field within a class or type. This data // is passed from the DAC to the DI in response to a request for class info. // This type is also used by CordbClass and CordbType to hold the list of fields for the // class. class MSLAYOUT FieldData { public: #ifndef RIGHT_SIDE_COMPILE // initialize various fields of an instance of FieldData from information in a FieldDesc void Initialize(BOOL fIsStatic, BOOL fIsPrimitive, mdFieldDef mdToken); #else HRESULT GetFieldSignature(class CordbModule * pModule, /*OUT*/ SigParser * pSigParser); #endif // clear various fields for a new instance of FieldData void ClearFields(); // Make sure it's okay to get or set an instance field offset. BOOL OkToGetOrSetInstanceOffset(); // Make sure it's okay to get or set a static field address. BOOL OkToGetOrSetStaticAddress(); // If this is an instance field, store its offset void SetInstanceOffset( SIZE_T offset ); // If this is a "normal" static, store its absolute address void SetStaticAddress( TADDR addr ); // If this is an instance field, return its offset // Note that this offset is allways a real offset (possibly larger than 22 bits), which isn't // necessarily the same as the overloaded FieldDesc.dwOffset field which can have // some special FIELD_OFFSET tokens. SIZE_T GetInstanceOffset(); // If this is a "normal" static, get its absolute address // TLS and context-specific statics are "special". TADDR GetStaticAddress(); // // Data members // mdFieldDef m_fldMetadataToken; // m_fFldStorageAvailable is true whenever the storage for this field is available. // If this is a field that is newly added with EnC and hasn't had any storage // allocated yet, then fldEnCAvailable will be false. BOOL m_fFldStorageAvailable; // Bits that specify what type of field this is bool m_fFldIsStatic; // true if static field, false if instance field bool m_fFldIsRVA; // true if static relative to module address bool m_fFldIsTLS; // true if thread-specific static bool m_fFldIsPrimitive; // Only true if this is a value type masquerading as a primitive. bool m_fFldIsCollectibleStatic; // true if this is a static field on a collectible type private: // The m_fldInstanceOffset and m_pFldStaticAddress are mutually exclusive. Only one is ever set at a time. SIZE_T m_fldInstanceOffset; // The offset of a field within an object instance // For EnC fields, this isn't actually within the object instance, // but has been cooked to still be relative to the beginning of // the object. TADDR m_pFldStaticAddress; // The absolute target address of a static field PCCOR_SIGNATURE m_fldSignatureCache; // This is passed across as null. It is a RS-only cache, and SHOULD // NEVER BE ACCESSED DIRECTLY! ULONG m_fldSignatureCacheSize; // This is passed across as 0. It is a RS-only cache, and SHOULD // NEVER BE ACCESSED DIRECTLY! public: VMPTR_FieldDesc m_vmFieldDesc; }; // class FieldData // ClassInfo holds information about a type (class or other structured type), including a list of its fields class MSLAYOUT ClassInfo { public: ClassInfo(); ~ClassInfo(); void Clear(); // Size of object in bytes, for non-generic types. Note: this is NOT valid for constructed value types, // e.g. value type Pair<DateTime,int>. Use CordbType::m_objectSize instead. SIZE_T m_objectSize; // list of structs containing information about all the fields in this Class, along with the number of entries // in the list. Does not include inherited fields. DON'T KEEP POINTERS TO ELEMENTS OF m_fieldList AROUND!! // This may be deleted if the class gets EnC'd. DacDbiArrayList<FieldData> m_fieldList; }; // class ClassInfo // EnCHangingFieldInfo holds information describing a field added with Edit And Continue. This data // is passed from the DAC to the DI in response to a request for EnC field info. class MSLAYOUT EnCHangingFieldInfo { public: // Init will initialize fields, taking into account whether the field is static or not. void Init(VMPTR_Object pObject, SIZE_T offset, mdFieldDef fieldToken, CorElementType elementType, mdTypeDef metadataToken, VMPTR_DomainAssembly vmDomainAssembly); DebuggerIPCE_BasicTypeData GetObjectTypeData() const { return m_objectTypeData; }; mdFieldDef GetFieldToken() const { return m_fldToken; }; VMPTR_Object GetVmObject() const { return m_vmObject; }; SIZE_T GetOffsetToVars() const { return m_offsetToVars; }; private: DebuggerIPCE_BasicTypeData m_objectTypeData; // type data for the EnC field VMPTR_Object m_vmObject; // object instance to which the field has been added--if the field is // static, this will be NULL instead of pointing to an instance SIZE_T m_offsetToVars; // offset to the beginning of variable storage in the object mdFieldDef m_fldToken; // metadata token for the added field }; // EnCHangingFieldInfo // TypeHandleToExpandedTypeInfo returns different DebuggerIPCE_ExpandedTypeData objects // depending on whether the object value that the TypeData corresponds to is // boxed or not. Different parts of the API transfer objects in slightly different ways. // AllBoxed: // For GetAndSendObjectData all values are boxed, // // OnlyPrimitivesUnboxed: // When returning results from FuncEval only "true" structs // get boxed, i.e. primitives are unboxed. // // NoValueTypeBoxing: // TypeHandleToExpandedTypeInfo is also used to report type parameters, // and in this case none of the types are considered boxed ( enum AreValueTypesBoxed { NoValueTypeBoxing, OnlyPrimitivesUnboxed, AllBoxed }; // TypeRefData is used for resolving a type reference (see code:CordbModule::ResolveTypeRef and // code:DacDbiInterfaceImpl::ResolveTypeReference) to store relevant information about the type typedef struct MSLAYOUT { // domain file for the type VMPTR_DomainAssembly vmDomainAssembly; // metadata token for the type. This may be a typeRef (for requests) or a typeDef (for responses). mdToken typeToken; } TypeRefData; // @dbgtodo Microsoft inspection: get rid of IPCE type. // TypeInfoList encapsulates a list of type data instances and the length of the list. typedef DacDbiArrayList<DebuggerIPCE_TypeArgData> TypeInfoList; // ArgInfoList encapsulates a list of type data instances for arguments for a top-level // type and the length of the list. typedef DacDbiArrayList<DebuggerIPCE_BasicTypeData> ArgInfoList; // TypeParamsList encapsulate a list of type parameters and the length of the list typedef DacDbiArrayList<DebuggerIPCE_ExpandedTypeData> TypeParamsList; // A struct for passing version information from DBI to DAC. // See code:CordbProcess::CordbProcess#DBIVersionChecking for more information. const DWORD kCurrentDacDbiProtocolBreakingChangeCounter = 1; struct DbiVersion { DWORD m_dwFormat; // the format of this DbiVersion instance DWORD m_dwDbiVersionMS; // version of the DBI DLL, in the convention used by VS_FIXEDFILEINFO DWORD m_dwDbiVersionLS; DWORD m_dwProtocolBreakingChangeCounter; // initially this was reserved and always set to 0 // Now we use it as a counter to explicitly introduce breaking changes // between DBI and DAC when we have our IPC transport in the middle // If DBI and DAC don't agree on the same value CheckDbiVersion will return CORDBG_E_INCOMPATIBLE_PROTOCOL // Please document every time this value changes // 0 - initial value // 1 - Indicates that the protocol now supports the GetRemoteInterfaceHashAndTimestamp message // The message must have ID 2, with signature: // OUT DWORD & hash1, OUT DWORD & hash2, OUT DWORD & hash3, OUT DWORD & hash4, OUT DWORD & timestamp1, OUT DWORD & timestamp2 // The hash can be used as an indicator of many other breaking changes providing // easier automated enforcement during development. It is NOT recommended to use // the hash as a release versioning mechanism however. DWORD m_dwReservedMustBeZero1; // reserved for future use }; // The way in which a thread is blocking on an object enum DacBlockingReason { DacBlockReason_MonitorCriticalSection, DacBlockReason_MonitorEvent }; // Information about an object which is blocking a managed thread struct DacBlockingObject { VMPTR_Object vmBlockingObject; VMPTR_AppDomain vmAppDomain; DWORD dwTimeout; DacBlockingReason blockingReason; }; // Opaque user defined data used in callbacks typedef void* CALLBACK_DATA; struct MonitorLockInfo { VMPTR_Thread lockOwner; DWORD acquisitionCount; }; struct MSLAYOUT DacGcReference { VMPTR_AppDomain vmDomain; // The AppDomain of the handle/object, may be null. union { CORDB_ADDRESS pObject; // A managed object, with the low bit set. VMPTR_OBJECTHANDLE objHnd; // A reference to the object, valid if (pAddress & 1) == 0 }; DWORD dwType; // Where the root came from. /* DependentSource - for HandleDependent RefCount - for HandleStrongRefCount Size - for HandleSizedByref */ UINT64 i64ExtraData; }; // struct DacGcReference struct MSLAYOUT DacExceptionCallStackData { VMPTR_AppDomain vmAppDomain; VMPTR_DomainAssembly vmDomainAssembly; CORDB_ADDRESS ip; mdMethodDef methodDef; BOOL isLastForeignExceptionFrame; }; // These represent the various states a SharedReJitInfo can be in. enum DacSharedReJitInfoState { // The profiler has requested a ReJit, so we've allocated stuff, but we haven't // called back to the profiler to get any info or indicate that the ReJit has // started. (This Info can be 'reused' for a new ReJit if the // profiler calls RequestReJit again before we transition to the next state.) kStateRequested = 0x00000000, // We have asked the profiler about this method via ICorProfilerFunctionControl, // and have thus stored the IL and codegen flags the profiler specified. Can only // transition to kStateReverted from this state. kStateActive = 0x00000001, // The methoddef has been reverted, but not freed yet. It (or its instantiations // for generics) *MAY* still be active on the stack someplace or have outstanding // memory references. kStateReverted = 0x00000002, kStateMask = 0x0000000F, }; struct MSLAYOUT DacSharedReJitInfo { DWORD m_state; CORDB_ADDRESS m_pbIL; DWORD m_dwCodegenFlags; ULONG m_cInstrumentedMapEntries; CORDB_ADDRESS m_rgInstrumentedMapEntries; }; // These represent the allocated bytes so far on the thread. struct MSLAYOUT DacThreadAllocInfo { ULONG64 m_allocBytesSOH; ULONG64 m_allocBytesUOH; }; #include "dacdbistructures.inl" #endif // DACDBISTRUCTURES_H_
-1
dotnet/runtime
66,235
Revert "Fix issues related to JsonSerializerOptions mutation and caching."
Reverts dotnet/runtime#65863 Fixes #66232
jkotas
2022-03-05T06:23:45Z
2022-03-05T14:08:07Z
17662fc30cfd4cc6e7dbba978a3fb512380c0b70
3ede8095da51d5d27a890020b61376155e9a61c2
Revert "Fix issues related to JsonSerializerOptions mutation and caching.". Reverts dotnet/runtime#65863 Fixes #66232
./src/mono/wasm/debugger/tests/debugger-test/debugger-test.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Linq; public partial class Math { //Only append content to this class as the test suite depends on line info public static int IntAdd(int a, int b) { int c = a + b; int d = c + b; int e = d + a; bool f = true; return e; } public static int UseComplex(int a, int b) { var complex = new Simple.Complex(10, "xx"); int c = a + b; int d = c + b; int e = d + a; int f = 0; e += complex.DoStuff(); return e; } delegate bool IsMathNull(Math m); public static int DelegatesTest() { Func<Math, bool> fn_func = (Math m) => m == null; Func<Math, bool> fn_func_null = null; Func<Math, bool>[] fn_func_arr = new Func<Math, bool>[] { (Math m) => m == null }; Math.IsMathNull fn_del = Math.IsMathNullDelegateTarget; var fn_del_arr = new Math.IsMathNull[] { Math.IsMathNullDelegateTarget }; var m_obj = new Math(); Math.IsMathNull fn_del_null = null; bool res = fn_func(m_obj) && fn_del(m_obj) && fn_del_arr[0](m_obj) && fn_del_null == null && fn_func_null == null && fn_func_arr[0] != null; // Unused locals Func<Math, bool> fn_func_unused = (Math m) => m == null; Func<Math, bool> fn_func_null_unused = null; Func<Math, bool>[] fn_func_arr_unused = new Func<Math, bool>[] { (Math m) => m == null }; Math.IsMathNull fn_del_unused = Math.IsMathNullDelegateTarget; Math.IsMathNull fn_del_null_unused = null; var fn_del_arr_unused = new Math.IsMathNull[] { Math.IsMathNullDelegateTarget }; OuterMethod(); Console.WriteLine("Just a test message, ignore"); return res ? 0 : 1; } public static int GenericTypesTest() { var list = new System.Collections.Generic.Dictionary<Math[], IsMathNull>(); System.Collections.Generic.Dictionary<Math[], IsMathNull> list_null = null; var list_arr = new System.Collections.Generic.Dictionary<Math[], IsMathNull>[] { new System.Collections.Generic.Dictionary<Math[], IsMathNull>() }; System.Collections.Generic.Dictionary<Math[], IsMathNull>[] list_arr_null = null; Console.WriteLine($"list_arr.Length: {list_arr.Length}, list.Count: {list.Count}"); // Unused locals var list_unused = new System.Collections.Generic.Dictionary<Math[], IsMathNull>(); System.Collections.Generic.Dictionary<Math[], IsMathNull> list_null_unused = null; var list_arr_unused = new System.Collections.Generic.Dictionary<Math[], IsMathNull>[] { new System.Collections.Generic.Dictionary<Math[], IsMathNull>() }; System.Collections.Generic.Dictionary<Math[], IsMathNull>[] list_arr_null_unused = null; OuterMethod(); Console.WriteLine("Just a test message, ignore"); return 0; } static bool IsMathNullDelegateTarget(Math m) => m == null; public static void OuterMethod() { Console.WriteLine($"OuterMethod called"); var nim = new Math.NestedInMath(); var i = 5; var text = "Hello"; var new_i = nim.InnerMethod(i); Console.WriteLine($"i: {i}"); Console.WriteLine($"-- InnerMethod returned: {new_i}, nim: {nim}, text: {text}"); int k = 19; new_i = InnerMethod2("test string", new_i, out k); Console.WriteLine($"-- InnerMethod2 returned: {new_i}, and k: {k}"); } static int InnerMethod2(string s, int i, out int k) { k = i + 10; Console.WriteLine($"s: {s}, i: {i}, k: {k}"); return i - 2; } public class NestedInMath { public int InnerMethod(int i) { SimpleStructProperty = new SimpleStruct() { dt = new DateTime(2020, 1, 2, 3, 4, 5) }; int j = i + 10; string foo_str = "foo"; Console.WriteLine($"i: {i} and j: {j}, foo_str: {foo_str} "); j += 9; Console.WriteLine($"i: {i} and j: {j}"); return j; } Math m = new Math(); public async System.Threading.Tasks.Task<bool> AsyncMethod0(string s, int i) { string local0 = "value0"; await System.Threading.Tasks.Task.Delay(1); Console.WriteLine($"* time for the second await, local0: {local0}"); await AsyncMethodNoReturn(); return true; } public async System.Threading.Tasks.Task AsyncMethodNoReturn() { var ss = new SimpleStruct() { dt = new DateTime(2020, 1, 2, 3, 4, 5) }; var ss_arr = new SimpleStruct[] { }; //ss.gs.StringField = "field in GenericStruct"; //Console.WriteLine ($"Using the struct: {ss.dt}, {ss.gs.StringField}, ss_arr: {ss_arr.Length}"); string str = "AsyncMethodNoReturn's local"; //Console.WriteLine ($"* field m: {m}"); await System.Threading.Tasks.Task.Delay(1); Console.WriteLine($"str: {str}"); } public static async System.Threading.Tasks.Task<bool> AsyncTest(string s, int i) { var li = 10 + i; var ls = s + "test"; return await new NestedInMath().AsyncMethod0(s, i); } public SimpleStruct SimpleStructProperty { get; set; } } public static void PrimitiveTypesTest() { char c0 = '€'; char c1 = 'A'; // TODO: other types! // just trying to ensure vars don't get optimized out if (c0 < 32 || c1 > 32) Console.WriteLine($"{c0}, {c1}"); } public static int DelegatesSignatureTest() { Func<Math, GenericStruct<GenericStruct<int[]>>, GenericStruct<bool[]>> fn_func = (m, gs) => new GenericStruct<bool[]>(); Func<Math, GenericStruct<GenericStruct<int[]>>, GenericStruct<bool[]>> fn_func_del = GenericStruct<int>.DelegateTargetForSignatureTest; Func<Math, GenericStruct<GenericStruct<int[]>>, GenericStruct<bool[]>> fn_func_null = null; Func<bool> fn_func_only_ret = () => { Console.WriteLine($"hello"); return true; }; var fn_func_arr = new Func<Math, GenericStruct<GenericStruct<int[]>>, GenericStruct<bool[]>>[] { (m, gs) => new GenericStruct<bool[]> () }; Math.DelegateForSignatureTest fn_del = GenericStruct<int>.DelegateTargetForSignatureTest; Math.DelegateForSignatureTest fn_del_l = (m, gs) => new GenericStruct<bool[]> { StringField = "fn_del_l#lambda" }; var fn_del_arr = new Math.DelegateForSignatureTest[] { GenericStruct<int>.DelegateTargetForSignatureTest, (m, gs) => new GenericStruct<bool[]> { StringField = "fn_del_arr#1#lambda" } }; var m_obj = new Math(); Math.DelegateForSignatureTest fn_del_null = null; var gs_gs = new GenericStruct<GenericStruct<int[]>> { List = new System.Collections.Generic.List<GenericStruct<int[]>> { new GenericStruct<int[]> { StringField = "gs#List#0#StringField" }, new GenericStruct<int[]> { StringField = "gs#List#1#StringField" } } }; Math.DelegateWithVoidReturn fn_void_del = Math.DelegateTargetWithVoidReturn; var fn_void_del_arr = new Math.DelegateWithVoidReturn[] { Math.DelegateTargetWithVoidReturn }; Math.DelegateWithVoidReturn fn_void_del_null = null; var rets = new GenericStruct<bool[]>[] { fn_func(m_obj, gs_gs), fn_func_del(m_obj, gs_gs), fn_del(m_obj, gs_gs), fn_del_l(m_obj, gs_gs), fn_del_arr[0](m_obj, gs_gs), fn_func_arr[0](m_obj, gs_gs) }; var gs = new GenericStruct<int[]>(); fn_void_del(gs); fn_void_del_arr[0](gs); fn_func_only_ret(); foreach (var ret in rets) Console.WriteLine($"ret: {ret}"); OuterMethod(); Console.WriteLine($"- {gs_gs.List[0].StringField}"); return 0; } public static int ActionTSignatureTest() { Action<GenericStruct<int[]>> fn_action = (_) => { }; Action<GenericStruct<int[]>> fn_action_del = Math.DelegateTargetWithVoidReturn; Action fn_action_bare = () => { }; Action<GenericStruct<int[]>> fn_action_null = null; var fn_action_arr = new Action<GenericStruct<int[]>>[] { (gs) => new GenericStruct<int[]>(), Math.DelegateTargetWithVoidReturn, null }; var gs = new GenericStruct<int[]>(); fn_action(gs); fn_action_del(gs); fn_action_arr[0](gs); fn_action_bare(); OuterMethod(); return 0; } public static int NestedDelegatesTest() { Func<Func<int, bool>, bool> fn_func = (_) => { return true; }; Func<Func<int, bool>, bool> fn_func_null = null; var fn_func_arr = new Func<Func<int, bool>, bool>[] { (gs) => { return true; } }; var fn_del_arr = new Func<Func<int, bool>, bool>[] { DelegateTargetForNestedFunc<Func<int, bool>> }; var m_obj = new Math(); Func<Func<int, bool>, bool> fn_del_null = null; Func<int, bool> fs = (i) => i == 0; fn_func(fs); fn_del_arr[0](fs); fn_func_arr[0](fs); OuterMethod(); return 0; } public static void DelegatesAsMethodArgsTest() { var _dst_arr = new DelegateForSignatureTest[] { GenericStruct<int>.DelegateTargetForSignatureTest, (m, gs) => new GenericStruct<bool[]>() }; Func<char[], bool> _fn_func = (cs) => cs.Length == 0; Action<GenericStruct<int>[]> _fn_action = (gss) => { }; new Math().MethodWithDelegateArgs(_dst_arr, _fn_func, _fn_action); } void MethodWithDelegateArgs(Math.DelegateForSignatureTest[] dst_arr, Func<char[], bool> fn_func, Action<GenericStruct<int>[]> fn_action) { Console.WriteLine($"Placeholder for breakpoint"); OuterMethod(); } public static async System.Threading.Tasks.Task MethodWithDelegatesAsyncTest() { await new Math().MethodWithDelegatesAsync(); } async System.Threading.Tasks.Task MethodWithDelegatesAsync() { var _dst_arr = new DelegateForSignatureTest[] { GenericStruct<int>.DelegateTargetForSignatureTest, (m, gs) => new GenericStruct<bool[]>() }; Func<char[], bool> _fn_func = (cs) => cs.Length == 0; Action<GenericStruct<int>[]> _fn_action = (gss) => { }; Console.WriteLine($"Placeholder for breakpoint"); await System.Threading.Tasks.Task.CompletedTask; } public delegate void DelegateWithVoidReturn(GenericStruct<int[]> gs); public static void DelegateTargetWithVoidReturn(GenericStruct<int[]> gs) { } public delegate GenericStruct<bool[]> DelegateForSignatureTest(Math m, GenericStruct<GenericStruct<int[]>> gs); static bool DelegateTargetForNestedFunc<T>(T arg) => true; public struct SimpleStruct { public DateTime dt; public GenericStruct<DateTime> gs; } public struct GenericStruct<T> { public System.Collections.Generic.List<T> List; public string StringField; public static GenericStruct<bool[]> DelegateTargetForSignatureTest(Math m, GenericStruct<GenericStruct<T[]>> gs) => new GenericStruct<bool[]>(); } public static void TestSimpleStrings() { string str_null = null; string str_empty = String.Empty; string str_spaces = " "; string str_esc = "\\"; var strings = new[] { str_null, str_empty, str_spaces, str_esc }; Console.WriteLine($"break here"); } } public class DebuggerTest { public static void run_all() { locals(); } public static int locals() { int l_int = 1; char l_char = 'A'; long l_long = Int64.MaxValue; ulong l_ulong = UInt64.MaxValue; locals_inner(); return 0; } static void locals_inner() { } public static void BoxingTest() { int? n_i = 5; object o_i = n_i.Value; object o_n_i = n_i; object o_s = "foobar"; object o_obj = new Math(); DebuggerTests.ValueTypesTest.GenericStruct<int>? n_gs = new DebuggerTests.ValueTypesTest.GenericStruct<int> { StringField = "n_gs#StringField" }; object o_gs = n_gs.Value; object o_n_gs = n_gs; DateTime? n_dt = new DateTime(2310, 1, 2, 3, 4, 5); object o_dt = n_dt.Value; object o_n_dt = n_dt; object o_null = null; object o_ia = new int[] {918, 58971}; Console.WriteLine ($"break here"); } public static async System.Threading.Tasks.Task BoxingTestAsync() { int? n_i = 5; object o_i = n_i.Value; object o_n_i = n_i; object o_s = "foobar"; object o_obj = new Math(); DebuggerTests.ValueTypesTest.GenericStruct<int>? n_gs = new DebuggerTests.ValueTypesTest.GenericStruct<int> { StringField = "n_gs#StringField" }; object o_gs = n_gs.Value; object o_n_gs = n_gs; DateTime? n_dt = new DateTime(2310, 1, 2, 3, 4, 5); object o_dt = n_dt.Value; object o_n_dt = n_dt; object o_null = null; object o_ia = new int[] {918, 58971}; Console.WriteLine ($"break here"); await System.Threading.Tasks.Task.CompletedTask; } public static void BoxedTypeObjectTest() { int i = 5; object o0 = i; object o1 = o0; object o2 = o1; object o3 = o2; object oo = new object(); object oo0 = oo; Console.WriteLine ($"break here"); } public static async System.Threading.Tasks.Task BoxedTypeObjectTestAsync() { int i = 5; object o0 = i; object o1 = o0; object o2 = o1; object o3 = o2; object oo = new object(); object oo0 = oo; Console.WriteLine ($"break here"); await System.Threading.Tasks.Task.CompletedTask; } public static void BoxedAsClass() { ValueType vt_dt = new DateTime(4819, 5, 6, 7, 8, 9); ValueType vt_gs = new Math.GenericStruct<string> { StringField = "vt_gs#StringField" }; Enum e = new System.IO.FileMode(); Enum ee = System.IO.FileMode.Append; Console.WriteLine ($"break here"); } public static async System.Threading.Tasks.Task BoxedAsClassAsync() { ValueType vt_dt = new DateTime(4819, 5, 6, 7, 8, 9); ValueType vt_gs = new Math.GenericStruct<string> { StringField = "vt_gs#StringField" }; Enum e = new System.IO.FileMode(); Enum ee = System.IO.FileMode.Append; Console.WriteLine ($"break here"); await System.Threading.Tasks.Task.CompletedTask; } } public class MulticastDelegateTestClass { event EventHandler<string> TestEvent; MulticastDelegate Delegate; public static void run() { var obj = new MulticastDelegateTestClass(); obj.Test(); obj.TestAsync().Wait(); } public void Test() { TestEvent += (_, s) => Console.WriteLine(s); TestEvent += (_, s) => Console.WriteLine(s + "qwe"); Delegate = TestEvent; TestEvent?.Invoke(this, Delegate?.ToString()); } public async System.Threading.Tasks.Task TestAsync() { TestEvent += (_, s) => Console.WriteLine(s); TestEvent += (_, s) => Console.WriteLine(s + "qwe"); Delegate = TestEvent; TestEvent?.Invoke(this, Delegate?.ToString()); await System.Threading.Tasks.Task.CompletedTask; } } public class EmptyClass { public static void StaticMethodWithNoLocals() { Console.WriteLine($"break here"); } public static async System.Threading.Tasks.Task StaticMethodWithNoLocalsAsync() { Console.WriteLine($"break here"); await System.Threading.Tasks.Task.CompletedTask; } public static void run() { StaticMethodWithNoLocals(); StaticMethodWithNoLocalsAsync().Wait(); } } public struct EmptyStruct { public static void StaticMethodWithNoLocals() { Console.WriteLine($"break here"); } public static async System.Threading.Tasks.Task StaticMethodWithNoLocalsAsync() { Console.WriteLine($"break here"); await System.Threading.Tasks.Task.CompletedTask; } public static void StaticMethodWithLocalEmptyStruct() { var es = new EmptyStruct(); Console.WriteLine($"break here"); } public static async System.Threading.Tasks.Task StaticMethodWithLocalEmptyStructAsync() { var es = new EmptyStruct(); Console.WriteLine($"break here"); await System.Threading.Tasks.Task.CompletedTask; } public static void run() { StaticMethodWithNoLocals(); StaticMethodWithNoLocalsAsync().Wait(); StaticMethodWithLocalEmptyStruct(); StaticMethodWithLocalEmptyStructAsync().Wait(); } } public class LoadDebuggerTest { public static void LoadLazyAssembly(string asm_base64, string pdb_base64) { byte[] asm_bytes = Convert.FromBase64String(asm_base64); byte[] pdb_bytes = null; if (pdb_base64 != null) pdb_bytes = Convert.FromBase64String(pdb_base64); var loadedAssembly = System.Runtime.Loader.AssemblyLoadContext.Default.LoadFromStream(new System.IO.MemoryStream(asm_bytes), new System.IO.MemoryStream(pdb_bytes)); Console.WriteLine($"Loaded - {loadedAssembly}"); } } public class HiddenSequencePointTest { public static void StepOverHiddenSP() { Console.WriteLine("first line"); #line hidden Console.WriteLine("second line"); StepOverHiddenSP2(); #line default Console.WriteLine("third line"); MethodWithHiddenLinesAtTheEnd(); } public static void StepOverHiddenSP2() { Console.WriteLine("StepOverHiddenSP2"); } public static void MethodWithHiddenLinesAtTheEnd() { Console.WriteLine ($"MethodWithHiddenLinesAtTheEnd"); #line hidden Console.WriteLine ($"debugger shouldn't be able to step here"); } #line default } public class LoadDebuggerTestALC { static System.Reflection.Assembly loadedAssembly; public static void LoadLazyAssemblyInALC(string asm_base64, string pdb_base64) { var context = new System.Runtime.Loader.AssemblyLoadContext("testContext", true); byte[] asm_bytes = Convert.FromBase64String(asm_base64); byte[] pdb_bytes = null; if (pdb_base64 != null) pdb_bytes = Convert.FromBase64String(pdb_base64); loadedAssembly = context.LoadFromStream(new System.IO.MemoryStream(asm_bytes), new System.IO.MemoryStream(pdb_bytes)); Console.WriteLine($"Loaded - {loadedAssembly}"); } public static void RunMethodInALC(string type_name, string method_name) { var myType = loadedAssembly.GetType(type_name); var myMethod = myType.GetMethod(method_name); myMethod.Invoke(null, new object[] { 5, 10 }); } } public class TestHotReload { static System.Reflection.Assembly loadedAssembly; static byte[] dmeta_data1_bytes; static byte[] dil_data1_bytes; static byte[] dpdb_data1_bytes; static byte[] dmeta_data2_bytes; static byte[] dil_data2_bytes; static byte[] dpdb_data2_bytes; public static void LoadLazyHotReload(string asm_base64, string pdb_base64, string dmeta_data1, string dil_data1, string dpdb_data1, string dmeta_data2, string dil_data2, string dpdb_data2) { byte[] asm_bytes = Convert.FromBase64String(asm_base64); byte[] pdb_bytes = Convert.FromBase64String(pdb_base64); dmeta_data1_bytes = Convert.FromBase64String(dmeta_data1); dil_data1_bytes = Convert.FromBase64String(dil_data1); dpdb_data1_bytes = Convert.FromBase64String(dpdb_data1); dmeta_data2_bytes = Convert.FromBase64String(dmeta_data2); dil_data2_bytes = Convert.FromBase64String(dil_data2); dpdb_data2_bytes = Convert.FromBase64String(dpdb_data2); loadedAssembly = System.Runtime.Loader.AssemblyLoadContext.Default.LoadFromStream(new System.IO.MemoryStream(asm_bytes), new System.IO.MemoryStream(pdb_bytes)); Console.WriteLine($"Loaded - {loadedAssembly}"); } public static void RunMethod(string className, string methodName) { var ty = typeof(System.Reflection.Metadata.MetadataUpdater); var mi = ty.GetMethod("GetCapabilities", System.Reflection.BindingFlags.NonPublic | System.Reflection.BindingFlags.Static, Array.Empty<Type>()); if (mi == null) return; var caps = mi.Invoke(null, null) as string; if (String.IsNullOrEmpty(caps)) return; var myType = loadedAssembly.GetType($"ApplyUpdateReferencedAssembly.{className}"); var myMethod = myType.GetMethod(methodName); myMethod.Invoke(null, null); ApplyUpdate(loadedAssembly, 1); myType = loadedAssembly.GetType($"ApplyUpdateReferencedAssembly.{className}"); myMethod = myType.GetMethod(methodName); myMethod.Invoke(null, null); ApplyUpdate(loadedAssembly, 2); myType = loadedAssembly.GetType($"ApplyUpdateReferencedAssembly.{className}"); myMethod = myType.GetMethod(methodName); myMethod.Invoke(null, null); } internal static void ApplyUpdate (System.Reflection.Assembly assm, int version) { string basename = assm.Location; if (basename == "") basename = assm.GetName().Name + ".dll"; Console.Error.WriteLine($"Apply Delta Update for {basename}, revision {version}"); if (version == 1) { System.Reflection.Metadata.MetadataUpdater.ApplyUpdate(assm, dmeta_data1_bytes, dil_data1_bytes, dpdb_data1_bytes); } else if (version == 2) { System.Reflection.Metadata.MetadataUpdater.ApplyUpdate(assm, dmeta_data2_bytes, dil_data2_bytes, dpdb_data2_bytes); } } } public class Something { public string Name { get; set; } public Something() => Name = "Same of something"; public override string ToString() => Name; } public class Foo { public string Bar => Stuffs.First(x => x.Name.StartsWith('S')).Name; public System.Collections.Generic.List<Something> Stuffs { get; } = Enumerable.Range(0, 10).Select(x => new Something()).ToList(); public string Lorem { get; set; } = "Safe"; public string Ipsum { get; set; } = "Side"; public Something What { get; } = new Something(); public int Bart() { int ret; if (Lorem.StartsWith('S')) ret = 0; else ret = 1; return ret; } public static void RunBart() { Foo foo = new Foo(); foo.Bart(); Console.WriteLine(foo.OtherBar()); foo.OtherBarAsync().Wait(10); } public bool OtherBar() { var a = 1; var b = 2; var x = "Stew"; var y = "00.123"; var c = a + b == 3 || b + a == 2; var d = TimeSpan.TryParseExact(y, @"ss\.fff", null, out var ts) && x.Contains('S'); var e = TimeSpan.TryParseExact(y, @"ss\.fff", null, out var ts1) && x.Contains('S'); var f = TimeSpan.TryParseExact(y, @"ss\.fff", null, out var ts2) && x.Contains('S'); var g = TimeSpan.TryParseExact(y, @"ss\.fff", null, out var ts3) && x.Contains('S'); return d && e == true; } public async System.Threading.Tasks.Task OtherBarAsync() { var a = 1; var b = 2; var x = "Stew"; var y = "00.123"; var c = a + b == 3 || b + a == 2; var d = TimeSpan.TryParseExact(y, @"ss\.fff", null, out var ts) && await AsyncMethod(); var e = TimeSpan.TryParseExact(y, @"ss\.fff", null, out var ts1) && await AsyncMethod(); var f = TimeSpan.TryParseExact(y, @"ss\.fff", null, out var ts2) && await AsyncMethod(); var g = await AsyncMethod() && await AsyncMethod(); Console.WriteLine(g); await System.Threading.Tasks.Task.CompletedTask; } public async System.Threading.Tasks.Task<bool> AsyncMethod() { await System.Threading.Tasks.Task.Delay(1); Console.WriteLine($"time for await"); return true; } } public class MainPage { public MainPage() { } int count = 0; private int someValue; public int SomeValue { get { return someValue; } set { someValue = value; count++; if (count == 10) { var view = 150; if (view != 50) { } System.Diagnostics.Debugger.Break(); } SomeValue = count; } } public static void CallSetValue() { var mainPage = new MainPage(); mainPage.SomeValue = 10; } } public class LoopClass { public static void LoopToBreak() { for (int i = 0; i < 10; i++) { Console.WriteLine($"should pause only on i == 3"); } Console.WriteLine("breakpoint to check"); } } public class SteppingInto { static int currentCount = 0; static MyIncrementer incrementer = new MyIncrementer(); public static void MethodToStep() { currentCount = incrementer.Increment(currentCount); } } public class MyIncrementer { private Func<DateTime> todayFunc = () => new DateTime(2061, 1, 5); // Wednesday public int Increment(int count) { var today = todayFunc(); if (today.DayOfWeek == DayOfWeek.Sunday) { return count + 2; } return count + 1; } } public class DebuggerAttribute { [System.Diagnostics.DebuggerHidden] public static void HiddenMethod() { var a = 9; } [System.Diagnostics.DebuggerHidden] public static void HiddenMethodUserBreak() { System.Diagnostics.Debugger.Break(); } public static void RunDebuggerHidden() { HiddenMethod(); HiddenMethodUserBreak(); } [System.Diagnostics.DebuggerStepThroughAttribute] public static void StepThrougBp() { var a = 0; a++; var b = 1; } [System.Diagnostics.DebuggerStepThroughAttribute] public static void StepThrougUserBp() { System.Diagnostics.Debugger.Break(); } public static void RunStepThrough() { StepThrougBp(); StepThrougUserBp(); } [System.Diagnostics.DebuggerNonUserCode] public static void NonUserCodeBp() { var a = 0; a++; var b = 1; } [System.Diagnostics.DebuggerNonUserCode] public static void NonUserCodeUserBp() { System.Diagnostics.Debugger.Break(); } public static void RunNonUserCode() { NonUserCodeBp(); NonUserCodeUserBp(); } [System.Diagnostics.DebuggerStepperBoundary] public static void BoundaryBp() { var a = 5; } [System.Diagnostics.DebuggerStepperBoundary] public static void BoundaryUserBp() { System.Diagnostics.Debugger.Break(); } [System.Diagnostics.DebuggerNonUserCode] public static void NonUserCodeForBoundaryEscape(Action boundaryTestFun) { boundaryTestFun(); } public static void RunNoBoundary() { NonUserCodeForBoundaryEscape(DebuggerAttribute.BoundaryBp); NonUserCodeForBoundaryEscape(DebuggerAttribute.BoundaryUserBp); } [System.Diagnostics.DebuggerStepThroughAttribute] [System.Diagnostics.DebuggerHidden] public static void StepThroughWithHiddenBp() { var a = 9; } [System.Diagnostics.DebuggerStepThroughAttribute] [System.Diagnostics.DebuggerHidden] public static void StepThroughWithHiddenUserBp() { System.Diagnostics.Debugger.Break(); } public static void RunStepThroughWithHidden() { StepThroughWithHiddenBp(); StepThroughWithHiddenUserBp(); } [System.Diagnostics.DebuggerStepThroughAttribute] [System.Diagnostics.DebuggerNonUserCode] public static void StepThroughWithNonUserCodeBp() { var a = 0; a++; var b = 1; } [System.Diagnostics.DebuggerStepThroughAttribute] [System.Diagnostics.DebuggerNonUserCode] public static void StepThroughWithNonUserCodeUserBp() { System.Diagnostics.Debugger.Break(); } public static void RunStepThroughWithNonUserCode() { StepThroughWithNonUserCodeBp(); StepThroughWithNonUserCodeUserBp(); } [System.Diagnostics.DebuggerNonUserCode] [System.Diagnostics.DebuggerHidden] public static void NonUserCodeWithHiddenBp() { var a = 9; } [System.Diagnostics.DebuggerNonUserCode] [System.Diagnostics.DebuggerHidden] public static void NonUserCodeWithHiddenUserBp() { System.Diagnostics.Debugger.Break(); } public static void RunNonUserCodeWithHidden() { NonUserCodeWithHiddenBp(); NonUserCodeWithHiddenUserBp(); } } public class DebugTypeFull { public static void CallToEvaluateLocal() { var asm = System.Reflection.Assembly.LoadFrom("debugger-test-with-full-debug-type.dll"); var myType = asm.GetType("DebuggerTests.ClassToInspectWithDebugTypeFull"); var myMethod = myType.GetConstructor(new Type[] { }); var a = myMethod.Invoke(new object[]{}); System.Diagnostics.Debugger.Break(); } } public class TestHotReloadUsingSDB { static System.Reflection.Assembly loadedAssembly; public static string LoadLazyHotReload(string asm_base64, string pdb_base64) { byte[] asm_bytes = Convert.FromBase64String(asm_base64); byte[] pdb_bytes = Convert.FromBase64String(pdb_base64); loadedAssembly = System.Runtime.Loader.AssemblyLoadContext.Default.LoadFromStream(new System.IO.MemoryStream(asm_bytes), new System.IO.MemoryStream(pdb_bytes)); var GUID = loadedAssembly.Modules.FirstOrDefault()?.ModuleVersionId.ToByteArray(); return Convert.ToBase64String(GUID); } public static string GetModuleGUID() { var GUID = loadedAssembly.Modules.FirstOrDefault()?.ModuleVersionId.ToByteArray(); return Convert.ToBase64String(GUID); } public static void RunMethod(string className, string methodName) { var myType = loadedAssembly.GetType($"ApplyUpdateReferencedAssembly.{className}"); var myMethod = myType.GetMethod(methodName); myMethod.Invoke(null, null); } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Linq; public partial class Math { //Only append content to this class as the test suite depends on line info public static int IntAdd(int a, int b) { int c = a + b; int d = c + b; int e = d + a; bool f = true; return e; } public static int UseComplex(int a, int b) { var complex = new Simple.Complex(10, "xx"); int c = a + b; int d = c + b; int e = d + a; int f = 0; e += complex.DoStuff(); return e; } delegate bool IsMathNull(Math m); public static int DelegatesTest() { Func<Math, bool> fn_func = (Math m) => m == null; Func<Math, bool> fn_func_null = null; Func<Math, bool>[] fn_func_arr = new Func<Math, bool>[] { (Math m) => m == null }; Math.IsMathNull fn_del = Math.IsMathNullDelegateTarget; var fn_del_arr = new Math.IsMathNull[] { Math.IsMathNullDelegateTarget }; var m_obj = new Math(); Math.IsMathNull fn_del_null = null; bool res = fn_func(m_obj) && fn_del(m_obj) && fn_del_arr[0](m_obj) && fn_del_null == null && fn_func_null == null && fn_func_arr[0] != null; // Unused locals Func<Math, bool> fn_func_unused = (Math m) => m == null; Func<Math, bool> fn_func_null_unused = null; Func<Math, bool>[] fn_func_arr_unused = new Func<Math, bool>[] { (Math m) => m == null }; Math.IsMathNull fn_del_unused = Math.IsMathNullDelegateTarget; Math.IsMathNull fn_del_null_unused = null; var fn_del_arr_unused = new Math.IsMathNull[] { Math.IsMathNullDelegateTarget }; OuterMethod(); Console.WriteLine("Just a test message, ignore"); return res ? 0 : 1; } public static int GenericTypesTest() { var list = new System.Collections.Generic.Dictionary<Math[], IsMathNull>(); System.Collections.Generic.Dictionary<Math[], IsMathNull> list_null = null; var list_arr = new System.Collections.Generic.Dictionary<Math[], IsMathNull>[] { new System.Collections.Generic.Dictionary<Math[], IsMathNull>() }; System.Collections.Generic.Dictionary<Math[], IsMathNull>[] list_arr_null = null; Console.WriteLine($"list_arr.Length: {list_arr.Length}, list.Count: {list.Count}"); // Unused locals var list_unused = new System.Collections.Generic.Dictionary<Math[], IsMathNull>(); System.Collections.Generic.Dictionary<Math[], IsMathNull> list_null_unused = null; var list_arr_unused = new System.Collections.Generic.Dictionary<Math[], IsMathNull>[] { new System.Collections.Generic.Dictionary<Math[], IsMathNull>() }; System.Collections.Generic.Dictionary<Math[], IsMathNull>[] list_arr_null_unused = null; OuterMethod(); Console.WriteLine("Just a test message, ignore"); return 0; } static bool IsMathNullDelegateTarget(Math m) => m == null; public static void OuterMethod() { Console.WriteLine($"OuterMethod called"); var nim = new Math.NestedInMath(); var i = 5; var text = "Hello"; var new_i = nim.InnerMethod(i); Console.WriteLine($"i: {i}"); Console.WriteLine($"-- InnerMethod returned: {new_i}, nim: {nim}, text: {text}"); int k = 19; new_i = InnerMethod2("test string", new_i, out k); Console.WriteLine($"-- InnerMethod2 returned: {new_i}, and k: {k}"); } static int InnerMethod2(string s, int i, out int k) { k = i + 10; Console.WriteLine($"s: {s}, i: {i}, k: {k}"); return i - 2; } public class NestedInMath { public int InnerMethod(int i) { SimpleStructProperty = new SimpleStruct() { dt = new DateTime(2020, 1, 2, 3, 4, 5) }; int j = i + 10; string foo_str = "foo"; Console.WriteLine($"i: {i} and j: {j}, foo_str: {foo_str} "); j += 9; Console.WriteLine($"i: {i} and j: {j}"); return j; } Math m = new Math(); public async System.Threading.Tasks.Task<bool> AsyncMethod0(string s, int i) { string local0 = "value0"; await System.Threading.Tasks.Task.Delay(1); Console.WriteLine($"* time for the second await, local0: {local0}"); await AsyncMethodNoReturn(); return true; } public async System.Threading.Tasks.Task AsyncMethodNoReturn() { var ss = new SimpleStruct() { dt = new DateTime(2020, 1, 2, 3, 4, 5) }; var ss_arr = new SimpleStruct[] { }; //ss.gs.StringField = "field in GenericStruct"; //Console.WriteLine ($"Using the struct: {ss.dt}, {ss.gs.StringField}, ss_arr: {ss_arr.Length}"); string str = "AsyncMethodNoReturn's local"; //Console.WriteLine ($"* field m: {m}"); await System.Threading.Tasks.Task.Delay(1); Console.WriteLine($"str: {str}"); } public static async System.Threading.Tasks.Task<bool> AsyncTest(string s, int i) { var li = 10 + i; var ls = s + "test"; return await new NestedInMath().AsyncMethod0(s, i); } public SimpleStruct SimpleStructProperty { get; set; } } public static void PrimitiveTypesTest() { char c0 = '€'; char c1 = 'A'; // TODO: other types! // just trying to ensure vars don't get optimized out if (c0 < 32 || c1 > 32) Console.WriteLine($"{c0}, {c1}"); } public static int DelegatesSignatureTest() { Func<Math, GenericStruct<GenericStruct<int[]>>, GenericStruct<bool[]>> fn_func = (m, gs) => new GenericStruct<bool[]>(); Func<Math, GenericStruct<GenericStruct<int[]>>, GenericStruct<bool[]>> fn_func_del = GenericStruct<int>.DelegateTargetForSignatureTest; Func<Math, GenericStruct<GenericStruct<int[]>>, GenericStruct<bool[]>> fn_func_null = null; Func<bool> fn_func_only_ret = () => { Console.WriteLine($"hello"); return true; }; var fn_func_arr = new Func<Math, GenericStruct<GenericStruct<int[]>>, GenericStruct<bool[]>>[] { (m, gs) => new GenericStruct<bool[]> () }; Math.DelegateForSignatureTest fn_del = GenericStruct<int>.DelegateTargetForSignatureTest; Math.DelegateForSignatureTest fn_del_l = (m, gs) => new GenericStruct<bool[]> { StringField = "fn_del_l#lambda" }; var fn_del_arr = new Math.DelegateForSignatureTest[] { GenericStruct<int>.DelegateTargetForSignatureTest, (m, gs) => new GenericStruct<bool[]> { StringField = "fn_del_arr#1#lambda" } }; var m_obj = new Math(); Math.DelegateForSignatureTest fn_del_null = null; var gs_gs = new GenericStruct<GenericStruct<int[]>> { List = new System.Collections.Generic.List<GenericStruct<int[]>> { new GenericStruct<int[]> { StringField = "gs#List#0#StringField" }, new GenericStruct<int[]> { StringField = "gs#List#1#StringField" } } }; Math.DelegateWithVoidReturn fn_void_del = Math.DelegateTargetWithVoidReturn; var fn_void_del_arr = new Math.DelegateWithVoidReturn[] { Math.DelegateTargetWithVoidReturn }; Math.DelegateWithVoidReturn fn_void_del_null = null; var rets = new GenericStruct<bool[]>[] { fn_func(m_obj, gs_gs), fn_func_del(m_obj, gs_gs), fn_del(m_obj, gs_gs), fn_del_l(m_obj, gs_gs), fn_del_arr[0](m_obj, gs_gs), fn_func_arr[0](m_obj, gs_gs) }; var gs = new GenericStruct<int[]>(); fn_void_del(gs); fn_void_del_arr[0](gs); fn_func_only_ret(); foreach (var ret in rets) Console.WriteLine($"ret: {ret}"); OuterMethod(); Console.WriteLine($"- {gs_gs.List[0].StringField}"); return 0; } public static int ActionTSignatureTest() { Action<GenericStruct<int[]>> fn_action = (_) => { }; Action<GenericStruct<int[]>> fn_action_del = Math.DelegateTargetWithVoidReturn; Action fn_action_bare = () => { }; Action<GenericStruct<int[]>> fn_action_null = null; var fn_action_arr = new Action<GenericStruct<int[]>>[] { (gs) => new GenericStruct<int[]>(), Math.DelegateTargetWithVoidReturn, null }; var gs = new GenericStruct<int[]>(); fn_action(gs); fn_action_del(gs); fn_action_arr[0](gs); fn_action_bare(); OuterMethod(); return 0; } public static int NestedDelegatesTest() { Func<Func<int, bool>, bool> fn_func = (_) => { return true; }; Func<Func<int, bool>, bool> fn_func_null = null; var fn_func_arr = new Func<Func<int, bool>, bool>[] { (gs) => { return true; } }; var fn_del_arr = new Func<Func<int, bool>, bool>[] { DelegateTargetForNestedFunc<Func<int, bool>> }; var m_obj = new Math(); Func<Func<int, bool>, bool> fn_del_null = null; Func<int, bool> fs = (i) => i == 0; fn_func(fs); fn_del_arr[0](fs); fn_func_arr[0](fs); OuterMethod(); return 0; } public static void DelegatesAsMethodArgsTest() { var _dst_arr = new DelegateForSignatureTest[] { GenericStruct<int>.DelegateTargetForSignatureTest, (m, gs) => new GenericStruct<bool[]>() }; Func<char[], bool> _fn_func = (cs) => cs.Length == 0; Action<GenericStruct<int>[]> _fn_action = (gss) => { }; new Math().MethodWithDelegateArgs(_dst_arr, _fn_func, _fn_action); } void MethodWithDelegateArgs(Math.DelegateForSignatureTest[] dst_arr, Func<char[], bool> fn_func, Action<GenericStruct<int>[]> fn_action) { Console.WriteLine($"Placeholder for breakpoint"); OuterMethod(); } public static async System.Threading.Tasks.Task MethodWithDelegatesAsyncTest() { await new Math().MethodWithDelegatesAsync(); } async System.Threading.Tasks.Task MethodWithDelegatesAsync() { var _dst_arr = new DelegateForSignatureTest[] { GenericStruct<int>.DelegateTargetForSignatureTest, (m, gs) => new GenericStruct<bool[]>() }; Func<char[], bool> _fn_func = (cs) => cs.Length == 0; Action<GenericStruct<int>[]> _fn_action = (gss) => { }; Console.WriteLine($"Placeholder for breakpoint"); await System.Threading.Tasks.Task.CompletedTask; } public delegate void DelegateWithVoidReturn(GenericStruct<int[]> gs); public static void DelegateTargetWithVoidReturn(GenericStruct<int[]> gs) { } public delegate GenericStruct<bool[]> DelegateForSignatureTest(Math m, GenericStruct<GenericStruct<int[]>> gs); static bool DelegateTargetForNestedFunc<T>(T arg) => true; public struct SimpleStruct { public DateTime dt; public GenericStruct<DateTime> gs; } public struct GenericStruct<T> { public System.Collections.Generic.List<T> List; public string StringField; public static GenericStruct<bool[]> DelegateTargetForSignatureTest(Math m, GenericStruct<GenericStruct<T[]>> gs) => new GenericStruct<bool[]>(); } public static void TestSimpleStrings() { string str_null = null; string str_empty = String.Empty; string str_spaces = " "; string str_esc = "\\"; var strings = new[] { str_null, str_empty, str_spaces, str_esc }; Console.WriteLine($"break here"); } } public class DebuggerTest { public static void run_all() { locals(); } public static int locals() { int l_int = 1; char l_char = 'A'; long l_long = Int64.MaxValue; ulong l_ulong = UInt64.MaxValue; locals_inner(); return 0; } static void locals_inner() { } public static void BoxingTest() { int? n_i = 5; object o_i = n_i.Value; object o_n_i = n_i; object o_s = "foobar"; object o_obj = new Math(); DebuggerTests.ValueTypesTest.GenericStruct<int>? n_gs = new DebuggerTests.ValueTypesTest.GenericStruct<int> { StringField = "n_gs#StringField" }; object o_gs = n_gs.Value; object o_n_gs = n_gs; DateTime? n_dt = new DateTime(2310, 1, 2, 3, 4, 5); object o_dt = n_dt.Value; object o_n_dt = n_dt; object o_null = null; object o_ia = new int[] {918, 58971}; Console.WriteLine ($"break here"); } public static async System.Threading.Tasks.Task BoxingTestAsync() { int? n_i = 5; object o_i = n_i.Value; object o_n_i = n_i; object o_s = "foobar"; object o_obj = new Math(); DebuggerTests.ValueTypesTest.GenericStruct<int>? n_gs = new DebuggerTests.ValueTypesTest.GenericStruct<int> { StringField = "n_gs#StringField" }; object o_gs = n_gs.Value; object o_n_gs = n_gs; DateTime? n_dt = new DateTime(2310, 1, 2, 3, 4, 5); object o_dt = n_dt.Value; object o_n_dt = n_dt; object o_null = null; object o_ia = new int[] {918, 58971}; Console.WriteLine ($"break here"); await System.Threading.Tasks.Task.CompletedTask; } public static void BoxedTypeObjectTest() { int i = 5; object o0 = i; object o1 = o0; object o2 = o1; object o3 = o2; object oo = new object(); object oo0 = oo; Console.WriteLine ($"break here"); } public static async System.Threading.Tasks.Task BoxedTypeObjectTestAsync() { int i = 5; object o0 = i; object o1 = o0; object o2 = o1; object o3 = o2; object oo = new object(); object oo0 = oo; Console.WriteLine ($"break here"); await System.Threading.Tasks.Task.CompletedTask; } public static void BoxedAsClass() { ValueType vt_dt = new DateTime(4819, 5, 6, 7, 8, 9); ValueType vt_gs = new Math.GenericStruct<string> { StringField = "vt_gs#StringField" }; Enum e = new System.IO.FileMode(); Enum ee = System.IO.FileMode.Append; Console.WriteLine ($"break here"); } public static async System.Threading.Tasks.Task BoxedAsClassAsync() { ValueType vt_dt = new DateTime(4819, 5, 6, 7, 8, 9); ValueType vt_gs = new Math.GenericStruct<string> { StringField = "vt_gs#StringField" }; Enum e = new System.IO.FileMode(); Enum ee = System.IO.FileMode.Append; Console.WriteLine ($"break here"); await System.Threading.Tasks.Task.CompletedTask; } } public class MulticastDelegateTestClass { event EventHandler<string> TestEvent; MulticastDelegate Delegate; public static void run() { var obj = new MulticastDelegateTestClass(); obj.Test(); obj.TestAsync().Wait(); } public void Test() { TestEvent += (_, s) => Console.WriteLine(s); TestEvent += (_, s) => Console.WriteLine(s + "qwe"); Delegate = TestEvent; TestEvent?.Invoke(this, Delegate?.ToString()); } public async System.Threading.Tasks.Task TestAsync() { TestEvent += (_, s) => Console.WriteLine(s); TestEvent += (_, s) => Console.WriteLine(s + "qwe"); Delegate = TestEvent; TestEvent?.Invoke(this, Delegate?.ToString()); await System.Threading.Tasks.Task.CompletedTask; } } public class EmptyClass { public static void StaticMethodWithNoLocals() { Console.WriteLine($"break here"); } public static async System.Threading.Tasks.Task StaticMethodWithNoLocalsAsync() { Console.WriteLine($"break here"); await System.Threading.Tasks.Task.CompletedTask; } public static void run() { StaticMethodWithNoLocals(); StaticMethodWithNoLocalsAsync().Wait(); } } public struct EmptyStruct { public static void StaticMethodWithNoLocals() { Console.WriteLine($"break here"); } public static async System.Threading.Tasks.Task StaticMethodWithNoLocalsAsync() { Console.WriteLine($"break here"); await System.Threading.Tasks.Task.CompletedTask; } public static void StaticMethodWithLocalEmptyStruct() { var es = new EmptyStruct(); Console.WriteLine($"break here"); } public static async System.Threading.Tasks.Task StaticMethodWithLocalEmptyStructAsync() { var es = new EmptyStruct(); Console.WriteLine($"break here"); await System.Threading.Tasks.Task.CompletedTask; } public static void run() { StaticMethodWithNoLocals(); StaticMethodWithNoLocalsAsync().Wait(); StaticMethodWithLocalEmptyStruct(); StaticMethodWithLocalEmptyStructAsync().Wait(); } } public class LoadDebuggerTest { public static void LoadLazyAssembly(string asm_base64, string pdb_base64) { byte[] asm_bytes = Convert.FromBase64String(asm_base64); byte[] pdb_bytes = null; if (pdb_base64 != null) pdb_bytes = Convert.FromBase64String(pdb_base64); var loadedAssembly = System.Runtime.Loader.AssemblyLoadContext.Default.LoadFromStream(new System.IO.MemoryStream(asm_bytes), new System.IO.MemoryStream(pdb_bytes)); Console.WriteLine($"Loaded - {loadedAssembly}"); } } public class HiddenSequencePointTest { public static void StepOverHiddenSP() { Console.WriteLine("first line"); #line hidden Console.WriteLine("second line"); StepOverHiddenSP2(); #line default Console.WriteLine("third line"); MethodWithHiddenLinesAtTheEnd(); } public static void StepOverHiddenSP2() { Console.WriteLine("StepOverHiddenSP2"); } public static void MethodWithHiddenLinesAtTheEnd() { Console.WriteLine ($"MethodWithHiddenLinesAtTheEnd"); #line hidden Console.WriteLine ($"debugger shouldn't be able to step here"); } #line default } public class LoadDebuggerTestALC { static System.Reflection.Assembly loadedAssembly; public static void LoadLazyAssemblyInALC(string asm_base64, string pdb_base64) { var context = new System.Runtime.Loader.AssemblyLoadContext("testContext", true); byte[] asm_bytes = Convert.FromBase64String(asm_base64); byte[] pdb_bytes = null; if (pdb_base64 != null) pdb_bytes = Convert.FromBase64String(pdb_base64); loadedAssembly = context.LoadFromStream(new System.IO.MemoryStream(asm_bytes), new System.IO.MemoryStream(pdb_bytes)); Console.WriteLine($"Loaded - {loadedAssembly}"); } public static void RunMethodInALC(string type_name, string method_name) { var myType = loadedAssembly.GetType(type_name); var myMethod = myType.GetMethod(method_name); myMethod.Invoke(null, new object[] { 5, 10 }); } } public class TestHotReload { static System.Reflection.Assembly loadedAssembly; static byte[] dmeta_data1_bytes; static byte[] dil_data1_bytes; static byte[] dpdb_data1_bytes; static byte[] dmeta_data2_bytes; static byte[] dil_data2_bytes; static byte[] dpdb_data2_bytes; public static void LoadLazyHotReload(string asm_base64, string pdb_base64, string dmeta_data1, string dil_data1, string dpdb_data1, string dmeta_data2, string dil_data2, string dpdb_data2) { byte[] asm_bytes = Convert.FromBase64String(asm_base64); byte[] pdb_bytes = Convert.FromBase64String(pdb_base64); dmeta_data1_bytes = Convert.FromBase64String(dmeta_data1); dil_data1_bytes = Convert.FromBase64String(dil_data1); dpdb_data1_bytes = Convert.FromBase64String(dpdb_data1); dmeta_data2_bytes = Convert.FromBase64String(dmeta_data2); dil_data2_bytes = Convert.FromBase64String(dil_data2); dpdb_data2_bytes = Convert.FromBase64String(dpdb_data2); loadedAssembly = System.Runtime.Loader.AssemblyLoadContext.Default.LoadFromStream(new System.IO.MemoryStream(asm_bytes), new System.IO.MemoryStream(pdb_bytes)); Console.WriteLine($"Loaded - {loadedAssembly}"); } public static void RunMethod(string className, string methodName) { var ty = typeof(System.Reflection.Metadata.MetadataUpdater); var mi = ty.GetMethod("GetCapabilities", System.Reflection.BindingFlags.NonPublic | System.Reflection.BindingFlags.Static, Array.Empty<Type>()); if (mi == null) return; var caps = mi.Invoke(null, null) as string; if (String.IsNullOrEmpty(caps)) return; var myType = loadedAssembly.GetType($"ApplyUpdateReferencedAssembly.{className}"); var myMethod = myType.GetMethod(methodName); myMethod.Invoke(null, null); ApplyUpdate(loadedAssembly, 1); myType = loadedAssembly.GetType($"ApplyUpdateReferencedAssembly.{className}"); myMethod = myType.GetMethod(methodName); myMethod.Invoke(null, null); ApplyUpdate(loadedAssembly, 2); myType = loadedAssembly.GetType($"ApplyUpdateReferencedAssembly.{className}"); myMethod = myType.GetMethod(methodName); myMethod.Invoke(null, null); } internal static void ApplyUpdate (System.Reflection.Assembly assm, int version) { string basename = assm.Location; if (basename == "") basename = assm.GetName().Name + ".dll"; Console.Error.WriteLine($"Apply Delta Update for {basename}, revision {version}"); if (version == 1) { System.Reflection.Metadata.MetadataUpdater.ApplyUpdate(assm, dmeta_data1_bytes, dil_data1_bytes, dpdb_data1_bytes); } else if (version == 2) { System.Reflection.Metadata.MetadataUpdater.ApplyUpdate(assm, dmeta_data2_bytes, dil_data2_bytes, dpdb_data2_bytes); } } } public class Something { public string Name { get; set; } public Something() => Name = "Same of something"; public override string ToString() => Name; } public class Foo { public string Bar => Stuffs.First(x => x.Name.StartsWith('S')).Name; public System.Collections.Generic.List<Something> Stuffs { get; } = Enumerable.Range(0, 10).Select(x => new Something()).ToList(); public string Lorem { get; set; } = "Safe"; public string Ipsum { get; set; } = "Side"; public Something What { get; } = new Something(); public int Bart() { int ret; if (Lorem.StartsWith('S')) ret = 0; else ret = 1; return ret; } public static void RunBart() { Foo foo = new Foo(); foo.Bart(); Console.WriteLine(foo.OtherBar()); foo.OtherBarAsync().Wait(10); } public bool OtherBar() { var a = 1; var b = 2; var x = "Stew"; var y = "00.123"; var c = a + b == 3 || b + a == 2; var d = TimeSpan.TryParseExact(y, @"ss\.fff", null, out var ts) && x.Contains('S'); var e = TimeSpan.TryParseExact(y, @"ss\.fff", null, out var ts1) && x.Contains('S'); var f = TimeSpan.TryParseExact(y, @"ss\.fff", null, out var ts2) && x.Contains('S'); var g = TimeSpan.TryParseExact(y, @"ss\.fff", null, out var ts3) && x.Contains('S'); return d && e == true; } public async System.Threading.Tasks.Task OtherBarAsync() { var a = 1; var b = 2; var x = "Stew"; var y = "00.123"; var c = a + b == 3 || b + a == 2; var d = TimeSpan.TryParseExact(y, @"ss\.fff", null, out var ts) && await AsyncMethod(); var e = TimeSpan.TryParseExact(y, @"ss\.fff", null, out var ts1) && await AsyncMethod(); var f = TimeSpan.TryParseExact(y, @"ss\.fff", null, out var ts2) && await AsyncMethod(); var g = await AsyncMethod() && await AsyncMethod(); Console.WriteLine(g); await System.Threading.Tasks.Task.CompletedTask; } public async System.Threading.Tasks.Task<bool> AsyncMethod() { await System.Threading.Tasks.Task.Delay(1); Console.WriteLine($"time for await"); return true; } } public class MainPage { public MainPage() { } int count = 0; private int someValue; public int SomeValue { get { return someValue; } set { someValue = value; count++; if (count == 10) { var view = 150; if (view != 50) { } System.Diagnostics.Debugger.Break(); } SomeValue = count; } } public static void CallSetValue() { var mainPage = new MainPage(); mainPage.SomeValue = 10; } } public class LoopClass { public static void LoopToBreak() { for (int i = 0; i < 10; i++) { Console.WriteLine($"should pause only on i == 3"); } Console.WriteLine("breakpoint to check"); } } public class SteppingInto { static int currentCount = 0; static MyIncrementer incrementer = new MyIncrementer(); public static void MethodToStep() { currentCount = incrementer.Increment(currentCount); } } public class MyIncrementer { private Func<DateTime> todayFunc = () => new DateTime(2061, 1, 5); // Wednesday public int Increment(int count) { var today = todayFunc(); if (today.DayOfWeek == DayOfWeek.Sunday) { return count + 2; } return count + 1; } } public class DebuggerAttribute { [System.Diagnostics.DebuggerHidden] public static void HiddenMethod() { var a = 9; } [System.Diagnostics.DebuggerHidden] public static void HiddenMethodUserBreak() { System.Diagnostics.Debugger.Break(); } public static void RunDebuggerHidden() { HiddenMethod(); HiddenMethodUserBreak(); } [System.Diagnostics.DebuggerStepThroughAttribute] public static void StepThrougBp() { var a = 0; a++; var b = 1; } [System.Diagnostics.DebuggerStepThroughAttribute] public static void StepThrougUserBp() { System.Diagnostics.Debugger.Break(); } public static void RunStepThrough() { StepThrougBp(); StepThrougUserBp(); } [System.Diagnostics.DebuggerNonUserCode] public static void NonUserCodeBp() { var a = 0; a++; var b = 1; } [System.Diagnostics.DebuggerNonUserCode] public static void NonUserCodeUserBp() { System.Diagnostics.Debugger.Break(); } public static void RunNonUserCode() { NonUserCodeBp(); NonUserCodeUserBp(); } [System.Diagnostics.DebuggerStepperBoundary] public static void BoundaryBp() { var a = 5; } [System.Diagnostics.DebuggerStepperBoundary] public static void BoundaryUserBp() { System.Diagnostics.Debugger.Break(); } [System.Diagnostics.DebuggerNonUserCode] public static void NonUserCodeForBoundaryEscape(Action boundaryTestFun) { boundaryTestFun(); } public static void RunNoBoundary() { NonUserCodeForBoundaryEscape(DebuggerAttribute.BoundaryBp); NonUserCodeForBoundaryEscape(DebuggerAttribute.BoundaryUserBp); } [System.Diagnostics.DebuggerStepThroughAttribute] [System.Diagnostics.DebuggerHidden] public static void StepThroughWithHiddenBp() { var a = 9; } [System.Diagnostics.DebuggerStepThroughAttribute] [System.Diagnostics.DebuggerHidden] public static void StepThroughWithHiddenUserBp() { System.Diagnostics.Debugger.Break(); } public static void RunStepThroughWithHidden() { StepThroughWithHiddenBp(); StepThroughWithHiddenUserBp(); } [System.Diagnostics.DebuggerStepThroughAttribute] [System.Diagnostics.DebuggerNonUserCode] public static void StepThroughWithNonUserCodeBp() { var a = 0; a++; var b = 1; } [System.Diagnostics.DebuggerStepThroughAttribute] [System.Diagnostics.DebuggerNonUserCode] public static void StepThroughWithNonUserCodeUserBp() { System.Diagnostics.Debugger.Break(); } public static void RunStepThroughWithNonUserCode() { StepThroughWithNonUserCodeBp(); StepThroughWithNonUserCodeUserBp(); } [System.Diagnostics.DebuggerNonUserCode] [System.Diagnostics.DebuggerHidden] public static void NonUserCodeWithHiddenBp() { var a = 9; } [System.Diagnostics.DebuggerNonUserCode] [System.Diagnostics.DebuggerHidden] public static void NonUserCodeWithHiddenUserBp() { System.Diagnostics.Debugger.Break(); } public static void RunNonUserCodeWithHidden() { NonUserCodeWithHiddenBp(); NonUserCodeWithHiddenUserBp(); } } public class DebugTypeFull { public static void CallToEvaluateLocal() { var asm = System.Reflection.Assembly.LoadFrom("debugger-test-with-full-debug-type.dll"); var myType = asm.GetType("DebuggerTests.ClassToInspectWithDebugTypeFull"); var myMethod = myType.GetConstructor(new Type[] { }); var a = myMethod.Invoke(new object[]{}); System.Diagnostics.Debugger.Break(); } } public class TestHotReloadUsingSDB { static System.Reflection.Assembly loadedAssembly; public static string LoadLazyHotReload(string asm_base64, string pdb_base64) { byte[] asm_bytes = Convert.FromBase64String(asm_base64); byte[] pdb_bytes = Convert.FromBase64String(pdb_base64); loadedAssembly = System.Runtime.Loader.AssemblyLoadContext.Default.LoadFromStream(new System.IO.MemoryStream(asm_bytes), new System.IO.MemoryStream(pdb_bytes)); var GUID = loadedAssembly.Modules.FirstOrDefault()?.ModuleVersionId.ToByteArray(); return Convert.ToBase64String(GUID); } public static string GetModuleGUID() { var GUID = loadedAssembly.Modules.FirstOrDefault()?.ModuleVersionId.ToByteArray(); return Convert.ToBase64String(GUID); } public static void RunMethod(string className, string methodName) { var myType = loadedAssembly.GetType($"ApplyUpdateReferencedAssembly.{className}"); var myMethod = myType.GetMethod(methodName); myMethod.Invoke(null, null); } }
-1
dotnet/runtime
66,235
Revert "Fix issues related to JsonSerializerOptions mutation and caching."
Reverts dotnet/runtime#65863 Fixes #66232
jkotas
2022-03-05T06:23:45Z
2022-03-05T14:08:07Z
17662fc30cfd4cc6e7dbba978a3fb512380c0b70
3ede8095da51d5d27a890020b61376155e9a61c2
Revert "Fix issues related to JsonSerializerOptions mutation and caching.". Reverts dotnet/runtime#65863 Fixes #66232
./src/native/external/brotli/common/dictionary.bin
timedownlifeleftbackcodedatashowonlysitecityopenjustlikefreeworktextyearoverbodyloveformbookplaylivelinehelphomesidemorewordlongthemviewfindpagedaysfullheadtermeachareafromtruemarkableuponhighdatelandnewsevennextcasebothpostusedmadehandherewhatnameLinkblogsizebaseheldmakemainuser') +holdendswithNewsreadweresigntakehavegameseencallpathwellplusmenufilmpartjointhislistgoodneedwayswestjobsmindalsologorichuseslastteamarmyfoodkingwilleastwardbestfirePageknowaway.pngmovethanloadgiveselfnotemuchfeedmanyrockicononcelookhidediedHomerulehostajaxinfoclublawslesshalfsomesuchzone100%onescareTimeracebluefourweekfacehopegavehardlostwhenparkkeptpassshiproomHTMLplanTypedonesavekeepflaglinksoldfivetookratetownjumpthusdarkcardfilefearstaykillthatfallautoever.comtalkshopvotedeepmoderestturnbornbandfellroseurl(skinrolecomeactsagesmeetgold.jpgitemvaryfeltthensenddropViewcopy1.0"</a>stopelseliestourpack.gifpastcss?graymean&gt;rideshotlatesaidroadvar feeljohnrickportfast'UA-dead</b>poorbilltypeU.S.woodmust2px;Inforankwidewantwalllead[0];paulwavesure$('#waitmassarmsgoesgainlangpaid!-- lockunitrootwalkfirmwifexml"songtest20pxkindrowstoolfontmailsafestarmapscorerainflowbabyspansays4px;6px;artsfootrealwikiheatsteptriporg/lakeweaktoldFormcastfansbankveryrunsjulytask1px;goalgrewslowedgeid="sets5px;.js?40pxif (soonseatnonetubezerosentreedfactintogiftharm18pxcamehillboldzoomvoideasyringfillpeakinitcost3px;jacktagsbitsrolleditknewnear<!--growJSONdutyNamesaleyou lotspainjazzcoldeyesfishwww.risktabsprev10pxrise25pxBlueding300,ballfordearnwildbox.fairlackverspairjunetechif(!pickevil$("#warmlorddoespull,000ideadrawhugespotfundburnhrefcellkeystickhourlossfuel12pxsuitdealRSS"agedgreyGET"easeaimsgirlaids8px;navygridtips#999warsladycars); }php?helltallwhomzh:*/ 100hall. A7px;pushchat0px;crew*/</hash75pxflatrare && tellcampontolaidmissskiptentfinemalegetsplot400, coolfeet.php<br>ericmostguidbelldeschairmathatom/img&#82luckcent000;tinygonehtmlselldrugFREEnodenick?id=losenullvastwindRSS wearrelybeensamedukenasacapewishgulfT23:hitsslotgatekickblurthey15px''););">msiewinsbirdsortbetaseekT18:ordstreemall60pxfarm’sboys[0].');"POSTbearkids);}}marytend(UK)quadzh:-siz----prop'); liftT19:viceandydebt>RSSpoolneckblowT16:doorevalT17:letsfailoralpollnovacolsgene —softrometillross<h3>pourfadepink<tr>mini)|!(minezh:barshear00);milk -->ironfreddiskwentsoilputs/js/holyT22:ISBNT20:adamsees<h2>json', 'contT21: RSSloopasiamoon</p>soulLINEfortcartT14:<h1>80px!--<9px;T04:mike:46ZniceinchYorkricezh:'));puremageparatonebond:37Z_of_']);000,zh:tankyardbowlbush:56ZJava30px |} %C3%:34ZjeffEXPIcashvisagolfsnowzh:quer.csssickmeatmin.binddellhirepicsrent:36ZHTTP-201fotowolfEND xbox:54ZBODYdick; } exit:35Zvarsbeat'});diet999;anne}}</[i].Langkm²wiretoysaddssealalex; }echonine.org005)tonyjewssandlegsroof000) 200winegeardogsbootgarycutstyletemption.xmlcockgang$('.50pxPh.Dmiscalanloandeskmileryanunixdisc);} dustclip). 70px-200DVDs7]><tapedemoi++)wageeurophiloptsholeFAQsasin-26TlabspetsURL bulkcook;} HEAD[0])abbrjuan(198leshtwin</i>sonyguysfuckpipe|- !002)ndow[1];[]; Log salt bangtrimbath){ 00px });ko:feesad> s:// [];tollplug(){ { .js'200pdualboat.JPG); }quot); '); } 201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037201320122011201020092008200720062005200420032002200120001999199819971996199519941993199219911990198919881987198619851984198319821981198019791978197719761975197419731972197119701969196819671966196519641963196219611960195919581957195619551954195319521951195010001024139400009999comomásesteestaperotodohacecadaañobiendíaasívidacasootroforosolootracualdijosidograntipotemadebealgoquéestonadatrespococasabajotodasinoaguapuesunosantediceluisellamayozonaamorpisoobraclicellodioshoracasiзанаомрарутанепоотизнодотожеонихНаеебымыВысовывоНообПолиниРФНеМытыОнимдаЗаДаНуОбтеИзейнуммТыужفيأنمامعكلأورديافىهولملكاولهبسالإنهيأيقدهلثمبهلوليبلايبكشيامأمنتبيلنحبهممشوشfirstvideolightworldmediawhitecloseblackrightsmallbooksplacemusicfieldorderpointvalueleveltableboardhousegroupworksyearsstatetodaywaterstartstyledeathpowerphonenighterrorinputabouttermstitletoolseventlocaltimeslargewordsgamesshortspacefocusclearmodelblockguideradiosharewomenagainmoneyimagenamesyounglineslatercolorgreenfront&amp;watchforcepricerulesbeginaftervisitissueareasbelowindextotalhourslabelprintpressbuiltlinksspeedstudytradefoundsenseundershownformsrangeaddedstillmovedtakenaboveflashfixedoftenotherviewschecklegalriveritemsquickshapehumanexistgoingmoviethirdbasicpeacestagewidthloginideaswrotepagesusersdrivestorebreaksouthvoicesitesmonthwherebuildwhichearthforumthreesportpartyClicklowerlivesclasslayerentrystoryusagesoundcourtyour birthpopuptypesapplyImagebeinguppernoteseveryshowsmeansextramatchtrackknownearlybegansuperpapernorthlearngivennamedendedTermspartsGroupbrandusingwomanfalsereadyaudiotakeswhile.com/livedcasesdailychildgreatjudgethoseunitsneverbroadcoastcoverapplefilescyclesceneplansclickwritequeenpieceemailframeolderphotolimitcachecivilscaleenterthemetheretouchboundroyalaskedwholesincestock namefaithheartemptyofferscopeownedmightalbumthinkbloodarraymajortrustcanonunioncountvalidstoneStyleLoginhappyoccurleft:freshquitefilmsgradeneedsurbanfightbasishoverauto;route.htmlmixedfinalYour slidetopicbrownalonedrawnsplitreachRightdatesmarchquotegoodsLinksdoubtasyncthumballowchiefyouthnovel10px;serveuntilhandsCheckSpacequeryjamesequaltwice0,000Startpanelsongsroundeightshiftworthpostsleadsweeksavoidthesemilesplanesmartalphaplantmarksratesplaysclaimsalestextsstarswrong</h3>thing.org/multiheardPowerstandtokensolid(thisbringshipsstafftriedcallsfullyfactsagentThis //-->adminegyptEvent15px;Emailtrue"crossspentblogsbox">notedleavechinasizesguest</h4>robotheavytrue,sevengrandcrimesignsawaredancephase><!--en_US&#39;200px_namelatinenjoyajax.ationsmithU.S. holdspeterindianav">chainscorecomesdoingpriorShare1990sromanlistsjapanfallstrialowneragree</h2>abusealertopera"-//WcardshillsteamsPhototruthclean.php?saintmetallouismeantproofbriefrow">genretrucklooksValueFrame.net/--> <try { var makescostsplainadultquesttrainlaborhelpscausemagicmotortheir250pxleaststepsCountcouldglasssidesfundshotelawardmouthmovesparisgivesdutchtexasfruitnull,||[];top"> <!--POST"ocean<br/>floorspeakdepth sizebankscatchchart20px;aligndealswould50px;url="parksmouseMost ...</amongbrainbody none;basedcarrydraftreferpage_home.meterdelaydreamprovejoint</tr>drugs<!-- aprilidealallenexactforthcodeslogicView seemsblankports (200saved_linkgoalsgrantgreekhomesringsrated30px;whoseparse();" Blocklinuxjonespixel');">);if(-leftdavidhorseFocusraiseboxesTrackement</em>bar">.src=toweralt="cablehenry24px;setupitalysharpminortastewantsthis.resetwheelgirls/css/100%;clubsstuffbiblevotes 1000korea}); bandsqueue= {};80px;cking{ aheadclockirishlike ratiostatsForm"yahoo)[0];Aboutfinds</h1>debugtasksURL =cells})();12px;primetellsturns0x600.jpg"spainbeachtaxesmicroangel--></giftssteve-linkbody.}); mount (199FAQ</rogerfrankClass28px;feeds<h1><scotttests22px;drink) || lewisshall#039; for lovedwaste00px;ja:simon<fontreplymeetsuntercheaptightBrand) != dressclipsroomsonkeymobilmain.Name platefunnytreescom/"1.jpgwmodeparamSTARTleft idden, 201); } form.viruschairtransworstPagesitionpatch<!-- o-cacfirmstours,000 asiani++){adobe')[0]id=10both;menu .2.mi.png"kevincoachChildbruce2.jpgURL)+.jpg|suitesliceharry120" sweettr> name=diegopage swiss--> #fff;">Log.com"treatsheet) && 14px;sleepntentfiledja:id="cName"worseshots-box-delta &lt;bears:48Z<data-rural</a> spendbakershops= "";php">ction13px;brianhellosize=o=%2F joinmaybe<img img">, fjsimg" ")[0]MTopBType"newlyDanskczechtrailknows</h5>faq">zh-cn10); -1");type=bluestrulydavis.js';> <!steel you h2> form jesus100% menu. walesrisksumentddingb-likteachgif" vegasdanskeestishqipsuomisobredesdeentretodospuedeañosestátienehastaotrospartedondenuevohacerformamismomejormundoaquídíassóloayudafechatodastantomenosdatosotrassitiomuchoahoralugarmayorestoshorastenerantesfotosestaspaísnuevasaludforosmedioquienmesespoderchileserávecesdecirjoséestarventagrupohechoellostengoamigocosasnivelgentemismaairesjuliotemashaciafavorjuniolibrepuntobuenoautorabrilbuenatextomarzosaberlistaluegocómoenerojuegoperúhaberestoynuncamujervalorfueralibrogustaigualvotoscasosguíapuedosomosavisousteddebennochebuscafaltaeurosseriedichocursoclavecasasleónplazolargoobrasvistaapoyojuntotratavistocrearcampohemoscincocargopisosordenhacenáreadiscopedrocercapuedapapelmenorútilclarojorgecalleponertardenadiemarcasigueellassiglocochemotosmadreclaserestoniñoquedapasarbancohijosviajepabloéstevienereinodejarfondocanalnorteletracausatomarmanoslunesautosvillavendopesartipostengamarcollevapadreunidovamoszonasambosbandamariaabusomuchasubirriojavivirgradochicaallíjovendichaestantalessalirsuelopesosfinesllamabuscoéstalleganegroplazahumorpagarjuntadobleislasbolsabañohablaluchaÁreadicenjugarnotasvalleallácargadolorabajoestégustomentemariofirmacostofichaplatahogarartesleyesaquelmuseobasespocosmitadcielochicomiedoganarsantoetapadebesplayaredessietecortecoreadudasdeseoviejodeseaaguas&quot;domaincommonstatuseventsmastersystemactionbannerremovescrollupdateglobalmediumfilternumberchangeresultpublicscreenchoosenormaltravelissuessourcetargetspringmodulemobileswitchphotosborderregionitselfsocialactivecolumnrecordfollowtitle>eitherlengthfamilyfriendlayoutauthorcreatereviewsummerserverplayedplayerexpandpolicyformatdoublepointsseriespersonlivingdesignmonthsforcesuniqueweightpeopleenergynaturesearchfigurehavingcustomoffsetletterwindowsubmitrendergroupsuploadhealthmethodvideosschoolfutureshadowdebatevaluesObjectothersrightsleaguechromesimplenoticesharedendingseasonreportonlinesquarebuttonimagesenablemovinglatestwinterFranceperiodstrongrepeatLondondetailformeddemandsecurepassedtoggleplacesdevicestaticcitiesstreamyellowattackstreetflighthiddeninfo">openedusefulvalleycausesleadersecretseconddamagesportsexceptratingsignedthingseffectfieldsstatesofficevisualeditorvolumeReportmuseummoviesparentaccessmostlymother" id="marketgroundchancesurveybeforesymbolmomentspeechmotioninsidematterCenterobjectexistsmiddleEuropegrowthlegacymannerenoughcareeransweroriginportalclientselectrandomclosedtopicscomingfatheroptionsimplyraisedescapechosenchurchdefinereasoncorneroutputmemoryiframepolicemodelsNumberduringoffersstyleskilledlistedcalledsilvermargindeletebetterbrowselimitsGlobalsinglewidgetcenterbudgetnowrapcreditclaimsenginesafetychoicespirit-stylespreadmakingneededrussiapleaseextentScriptbrokenallowschargedividefactormember-basedtheoryconfigaroundworkedhelpedChurchimpactshouldalwayslogo" bottomlist">){var prefixorangeHeader.push(couplegardenbridgelaunchReviewtakingvisionlittledatingButtonbeautythemesforgotSearchanchoralmostloadedChangereturnstringreloadMobileincomesupplySourceordersviewed&nbsp;courseAbout island<html cookiename="amazonmodernadvicein</a>: The dialoghousesBEGIN MexicostartscentreheightaddingIslandassetsEmpireSchooleffortdirectnearlymanualSelect. Onejoinedmenu">PhilipawardshandleimportOfficeregardskillsnationSportsdegreeweekly (e.g.behinddoctorloggedunited</b></beginsplantsassistartistissued300px|canadaagencyschemeremainBrazilsamplelogo">beyond-scaleacceptservedmarineFootercamera</h1> _form"leavesstress" /> .gif" onloadloaderOxfordsistersurvivlistenfemaleDesignsize="appealtext">levelsthankshigherforcedanimalanyoneAfricaagreedrecentPeople<br />wonderpricesturned|| {};main">inlinesundaywrap">failedcensusminutebeaconquotes150px|estateremoteemail"linkedright;signalformal1.htmlsignupprincefloat:.png" forum.AccesspaperssoundsextendHeightsliderUTF-8"&amp; Before. WithstudioownersmanageprofitjQueryannualparamsboughtfamousgooglelongeri++) {israelsayingdecidehome">headerensurebranchpiecesblock;statedtop"><racingresize--&gt;pacitysexualbureau.jpg" 10,000obtaintitlesamount, Inc.comedymenu" lyricstoday.indeedcounty_logo.FamilylookedMarketlse ifPlayerturkey);var forestgivingerrorsDomain}else{insertBlog</footerlogin.fasteragents<body 10px 0pragmafridayjuniordollarplacedcoversplugin5,000 page">boston.test(avatartested_countforumsschemaindex,filledsharesreaderalert(appearSubmitline">body"> * TheThoughseeingjerseyNews</verifyexpertinjurywidth=CookieSTART across_imagethreadnativepocketbox"> System DavidcancertablesprovedApril reallydriveritem">more">boardscolorscampusfirst || [];media.guitarfinishwidth:showedOther .php" assumelayerswilsonstoresreliefswedenCustomeasily your String Whiltaylorclear:resortfrenchthough") + "<body>buyingbrandsMembername">oppingsector5px;">vspacepostermajor coffeemartinmaturehappen</nav>kansaslink">Images=falsewhile hspace0&amp; In powerPolski-colorjordanBottomStart -count2.htmlnews">01.jpgOnline-rightmillerseniorISBN 00,000 guidesvalue)ectionrepair.xml" rights.html-blockregExp:hoverwithinvirginphones</tr> using var >'); </td> </tr> bahasabrasilgalegomagyarpolskisrpskiردو中文简体繁體信息中国我们一个公司管理论坛可以服务时间个人产品自己企业查看工作联系没有网站所有评论中心文章用户首页作者技术问题相关下载搜索使用软件在线主题资料视频回复注册网络收藏内容推荐市场消息空间发布什么好友生活图片发展如果手机新闻最新方式北京提供关于更多这个系统知道游戏广告其他发表安全第一会员进行点击版权电子世界设计免费教育加入活动他们商品博客现在上海如何已经留言详细社区登录本站需要价格支持国际链接国家建设朋友阅读法律位置经济选择这样当前分类排行因为交易最后音乐不能通过行业科技可能设备合作大家社会研究专业全部项目这里还是开始情况电脑文件品牌帮助文化资源大学学习地址浏览投资工程要求怎么时候功能主要目前资讯城市方法电影招聘声明任何健康数据美国汽车介绍但是交流生产所以电话显示一些单位人员分析地图旅游工具学生系列网友帖子密码频道控制地区基本全国网上重要第二喜欢进入友情这些考试发现培训以上政府成为环境香港同时娱乐发送一定开发作品标准欢迎解决地方一下以及责任或者客户代表积分女人数码销售出现离线应用列表不同编辑统计查询不要有关机构很多播放组织政策直接能力来源時間看到热门关键专区非常英语百度希望美女比较知识规定建议部门意见精彩日本提高发言方面基金处理权限影片银行还有分享物品经营添加专家这种话题起来业务公告记录简介质量男人影响引用报告部分快速咨询时尚注意申请学校应该历史只是返回购买名称为了成功说明供应孩子专题程序一般會員只有其它保护而且今天窗口动态状态特别认为必须更新小说我們作为媒体包括那么一样国内是否根据电视学院具有过程由于人才出来不过正在明星故事关系标题商务输入一直基础教学了解建筑结果全球通知计划对于艺术相册发生真的建立等级类型经验实现制作来自标签以下原创无法其中個人一切指南关闭集团第三关注因此照片深圳商业广州日期高级最近综合表示专辑行为交通评价觉得精华家庭完成感觉安装得到邮件制度食品虽然转载报价记者方案行政人民用品东西提出酒店然后付款热点以前完全发帖设置领导工业医院看看经典原因平台各种增加材料新增之后职业效果今年论文我国告诉版主修改参与打印快乐机械观点存在精神获得利用继续你们这么模式语言能够雅虎操作风格一起科学体育短信条件治疗运动产业会议导航先生联盟可是問題结构作用调查資料自动负责农业访问实施接受讨论那个反馈加强女性范围服務休闲今日客服觀看参加的话一点保证图书有效测试移动才能决定股票不断需求不得办法之间采用营销投诉目标爱情摄影有些複製文学机会数字装修购物农村全面精品其实事情水平提示上市谢谢普通教师上传类别歌曲拥有创新配件只要时代資訊达到人生订阅老师展示心理贴子網站主題自然级别简单改革那些来说打开代码删除证券节目重点次數多少规划资金找到以后大全主页最佳回答天下保障现代检查投票小时沒有正常甚至代理目录公开复制金融幸福版本形成准备行情回到思想怎样协议认证最好产生按照服装广东动漫采购新手组图面板参考政治容易天地努力人们升级速度人物调整流行造成文字韩国贸易开展相關表现影视如此美容大小报道条款心情许多法规家居书店连接立即举报技巧奥运登入以来理论事件自由中华办公妈妈真正不错全文合同价值别人监督具体世纪团队创业承担增长有人保持商家维修台湾左右股份答案实际电信经理生命宣传任务正式特色下来协会只能当然重新內容指导运行日志賣家超过土地浙江支付推出站长杭州执行制造之一推广现场描述变化传统歌手保险课程医疗经过过去之前收入年度杂志美丽最高登陆未来加工免责教程版块身体重庆出售成本形式土豆出價东方邮箱南京求职取得职位相信页面分钟网页确定图例网址积极错误目的宝贝机关风险授权病毒宠物除了評論疾病及时求购站点儿童每天中央认识每个天津字体台灣维护本页个性官方常见相机战略应当律师方便校园股市房屋栏目员工导致突然道具本网结合档案劳动另外美元引起改变第四会计說明隐私宝宝规范消费共同忘记体系带来名字發表开放加盟受到二手大量成人数量共享区域女孩原则所在结束通信超级配置当时优秀性感房产遊戲出口提交就业保健程度参数事业整个山东情感特殊分類搜尋属于门户财务声音及其财经坚持干部成立利益考虑成都包装用戶比赛文明招商完整真是眼睛伙伴威望领域卫生优惠論壇公共良好充分符合附件特点不可英文资产根本明显密碼公众民族更加享受同学启动适合原来问答本文美食绿色稳定终于生物供求搜狐力量严重永远写真有限竞争对象费用不好绝对十分促进点评影音优势不少欣赏并且有点方向全新信用设施形象资格突破随着重大于是毕业智能化工完美商城统一出版打造產品概况用于保留因素中國存储贴图最愛长期口价理财基地安排武汉里面创建天空首先完善驱动下面不再诚信意义阳光英国漂亮军事玩家群众农民即可名稱家具动画想到注明小学性能考研硬件观看清楚搞笑首頁黄金适用江苏真实主管阶段註冊翻译权利做好似乎通讯施工狀態也许环保培养概念大型机票理解匿名cuandoenviarmadridbuscariniciotiempoporquecuentaestadopuedenjuegoscontraestánnombretienenperfilmaneraamigosciudadcentroaunquepuedesdentroprimerpreciosegúnbuenosvolverpuntossemanahabíaagostonuevosunidoscarlosequiponiñosmuchosalgunacorreoimagenpartirarribamaríahombreempleoverdadcambiomuchasfueronpasadolíneaparecenuevascursosestabaquierolibroscuantoaccesomiguelvarioscuatrotienesgruposseráneuropamediosfrenteacercademásofertacochesmodeloitalialetrasalgúncompracualesexistecuerposiendoprensallegarviajesdineromurciapodrápuestodiariopuebloquieremanuelpropiocrisisciertoseguromuertefuentecerrargrandeefectopartesmedidapropiaofrecetierrae-mailvariasformasfuturoobjetoseguirriesgonormasmismosúnicocaminositiosrazóndebidopruebatoledoteníajesúsesperococinaorigentiendacientocádizhablarseríalatinafuerzaestiloguerraentraréxitolópezagendavídeoevitarpaginametrosjavierpadresfácilcabezaáreassalidaenvíojapónabusosbienestextosllevarpuedanfuertecomúnclaseshumanotenidobilbaounidadestáseditarcreadoдлячтокакилиэтовсеегопритакещеужеКакбезбылониВсеподЭтотомчемнетлетразонагдемнеДляПринаснихтемктогодвоттамСШАмаяЧтовасвамемуТакдванамэтиэтуВамтехпротутнаддняВоттринейВаснимсамтотрубОнимирнееОООлицэтаОнанемдоммойдвеоносудकेहैकीसेकाकोऔरपरनेएककिभीइसकरतोहोआपहीयहयातकथाjagranआजजोअबदोगईजागएहमइनवहयेथेथीघरजबदीकईजीवेनईनएहरउसमेकमवोलेसबमईदेओरआमबसभरबनचलमनआगसीलीعلىإلىهذاآخرعددالىهذهصورغيركانولابينعرضذلكهنايومقالعليانالكنحتىقبلوحةاخرفقطعبدركنإذاكمااحدإلافيهبعضكيفبحثومنوهوأناجدالهاسلمعندليسعبرصلىمنذبهاأنهمثلكنتالاحيثمصرشرححولوفياذالكلمرةانتالفأبوخاصأنتانهاليعضووقدابنخيربنتلكمشاءوهيابوقصصومارقمأحدنحنعدمرأياحةكتبدونيجبمنهتحتجهةسنةيتمكرةغزةنفسبيتللهلناتلكقلبلماعنهأولشيءنورأمافيكبكلذاترتببأنهمسانكبيعفقدحسنلهمشعرأهلشهرقطرطلبprofileservicedefaulthimselfdetailscontentsupportstartedmessagesuccessfashion<title>countryaccountcreatedstoriesresultsrunningprocesswritingobjectsvisiblewelcomearticleunknownnetworkcompanydynamicbrowserprivacyproblemServicerespectdisplayrequestreservewebsitehistoryfriendsoptionsworkingversionmillionchannelwindow.addressvisitedweathercorrectproductedirectforwardyou canremovedsubjectcontrolarchivecurrentreadinglibrarylimitedmanagerfurthersummarymachineminutesprivatecontextprogramsocietynumberswrittenenabledtriggersourcesloadingelementpartnerfinallyperfectmeaningsystemskeepingculture&quot;,journalprojectsurfaces&quot;expiresreviewsbalanceEnglishContentthroughPlease opinioncontactaverageprimaryvillageSpanishgallerydeclinemeetingmissionpopularqualitymeasuregeneralspeciessessionsectionwriterscounterinitialreportsfiguresmembersholdingdisputeearlierexpressdigitalpictureAnothermarriedtrafficleadingchangedcentralvictoryimages/reasonsstudiesfeaturelistingmust beschoolsVersionusuallyepisodeplayinggrowingobviousoverlaypresentactions</ul> wrapperalreadycertainrealitystorageanotherdesktopofferedpatternunusualDigitalcapitalWebsitefailureconnectreducedAndroiddecadesregular &amp; animalsreleaseAutomatgettingmethodsnothingPopularcaptionletterscapturesciencelicensechangesEngland=1&amp;History = new CentralupdatedSpecialNetworkrequirecommentwarningCollegetoolbarremainsbecauseelectedDeutschfinanceworkersquicklybetweenexactlysettingdiseaseSocietyweaponsexhibit&lt;!--Controlclassescoveredoutlineattacksdevices(windowpurposetitle="Mobile killingshowingItaliandroppedheavilyeffects-1']); confirmCurrentadvancesharingopeningdrawingbillionorderedGermanyrelated</form>includewhetherdefinedSciencecatalogArticlebuttonslargestuniformjourneysidebarChicagoholidayGeneralpassage,&quot;animatefeelingarrivedpassingnaturalroughly. The but notdensityBritainChineselack oftributeIreland" data-factorsreceivethat isLibraryhusbandin factaffairsCharlesradicalbroughtfindinglanding:lang="return leadersplannedpremiumpackageAmericaEdition]&quot;Messageneed tovalue="complexlookingstationbelievesmaller-mobilerecordswant tokind ofFirefoxyou aresimilarstudiedmaximumheadingrapidlyclimatekingdomemergedamountsfoundedpioneerformuladynastyhow to SupportrevenueeconomyResultsbrothersoldierlargelycalling.&quot;AccountEdward segmentRobert effortsPacificlearnedup withheight:we haveAngelesnations_searchappliedacquiremassivegranted: falsetreatedbiggestbenefitdrivingStudiesminimumperhapsmorningsellingis usedreversevariant role="missingachievepromotestudentsomeoneextremerestorebottom:evolvedall thesitemapenglishway to AugustsymbolsCompanymattersmusicalagainstserving})(); paymenttroubleconceptcompareparentsplayersregionsmonitor ''The winningexploreadaptedGalleryproduceabilityenhancecareers). The collectSearch ancientexistedfooter handlerprintedconsoleEasternexportswindowsChannelillegalneutralsuggest_headersigning.html">settledwesterncausing-webkitclaimedJusticechaptervictimsThomas mozillapromisepartieseditionoutside:false,hundredOlympic_buttonauthorsreachedchronicdemandssecondsprotectadoptedprepareneithergreatlygreateroverallimprovecommandspecialsearch.worshipfundingthoughthighestinsteadutilityquarterCulturetestingclearlyexposedBrowserliberal} catchProjectexamplehide();FloridaanswersallowedEmperordefenseseriousfreedomSeveral-buttonFurtherout of != nulltrainedDenmarkvoid(0)/all.jspreventRequestStephen When observe</h2> Modern provide" alt="borders. For Many artistspoweredperformfictiontype ofmedicalticketsopposedCouncilwitnessjusticeGeorge Belgium...</a>twitternotablywaitingwarfare Other rankingphrasesmentionsurvivescholar</p> Countryignoredloss ofjust asGeorgiastrange<head><stopped1']); islandsnotableborder:list ofcarried100,000</h3> severalbecomesselect wedding00.htmlmonarchoff theteacherhighly biologylife ofor evenrise of&raquo;plusonehunting(thoughDouglasjoiningcirclesFor theAncientVietnamvehiclesuch ascrystalvalue =Windowsenjoyeda smallassumed<a id="foreign All rihow theDisplayretiredhoweverhidden;battlesseekingcabinetwas notlook atconductget theJanuaryhappensturninga:hoverOnline French lackingtypicalextractenemieseven ifgeneratdecidedare not/searchbeliefs-image:locatedstatic.login">convertviolententeredfirst">circuitFinlandchemistshe was10px;">as suchdivided</span>will beline ofa greatmystery/index.fallingdue to railwaycollegemonsterdescentit withnuclearJewish protestBritishflowerspredictreformsbutton who waslectureinstantsuicidegenericperiodsmarketsSocial fishingcombinegraphicwinners<br /><by the NaturalPrivacycookiesoutcomeresolveSwedishbrieflyPersianso muchCenturydepictscolumnshousingscriptsnext tobearingmappingrevisedjQuery(-width:title">tooltipSectiondesignsTurkishyounger.match(})(); burningoperatedegreessource=Richardcloselyplasticentries</tr> color:#ul id="possessrollingphysicsfailingexecutecontestlink toDefault<br /> : true,chartertourismclassicproceedexplain</h1> online.?xml vehelpingdiamonduse theairlineend -->).attr(readershosting#ffffffrealizeVincentsignals src="/ProductdespitediversetellingPublic held inJoseph theatreaffects<style>a largedoesn'tlater, ElementfaviconcreatorHungaryAirportsee theso thatMichaelSystemsPrograms, and width=e&quot;tradingleft"> personsGolden Affairsgrammarformingdestroyidea ofcase ofoldest this is.src = cartoonregistrCommonsMuslimsWhat isin manymarkingrevealsIndeed,equally/show_aoutdoorescape(Austriageneticsystem,In the sittingHe alsoIslandsAcademy <!--Daniel bindingblock">imposedutilizeAbraham(except{width:putting).html(|| []; DATA[ *kitchenmountedactual dialectmainly _blank'installexpertsif(typeIt also&copy; ">Termsborn inOptionseasterntalkingconcerngained ongoingjustifycriticsfactoryits ownassaultinvitedlastinghis ownhref="/" rel="developconcertdiagramdollarsclusterphp?id=alcohol);})();using a><span>vesselsrevivalAddressamateurandroidallegedillnesswalkingcentersqualifymatchesunifiedextinctDefensedied in <!-- customslinkingLittle Book ofeveningmin.js?are thekontakttoday's.html" target=wearingAll Rig; })();raising Also, crucialabout">declare--> <scfirefoxas muchappliesindex, s, but type = <!--towardsRecordsPrivateForeignPremierchoicesVirtualreturnsCommentPoweredinline;povertychamberLiving volumesAnthonylogin" RelatedEconomyreachescuttinggravitylife inChapter-shadowNotable</td> returnstadiumwidgetsvaryingtravelsheld bywho arework infacultyangularwho hadairporttown of Some 'click'chargeskeywordit willcity of(this);Andrew unique checkedor more300px; return;rsion="pluginswithin herselfStationFederalventurepublishsent totensionactresscome tofingersDuke ofpeople,exploitwhat isharmonya major":"httpin his menu"> monthlyofficercouncilgainingeven inSummarydate ofloyaltyfitnessand wasemperorsupremeSecond hearingRussianlongestAlbertalateralset of small">.appenddo withfederalbank ofbeneathDespiteCapitalgrounds), and percentit fromclosingcontainInsteadfifteenas well.yahoo.respondfighterobscurereflectorganic= Math.editingonline paddinga wholeonerroryear ofend of barrierwhen itheader home ofresumedrenamedstrong>heatingretainscloudfrway of March 1knowingin partBetweenlessonsclosestvirtuallinks">crossedEND -->famous awardedLicenseHealth fairly wealthyminimalAfricancompetelabel">singingfarmersBrasil)discussreplaceGregoryfont copursuedappearsmake uproundedboth ofblockedsaw theofficescoloursif(docuwhen heenforcepush(fuAugust UTF-8">Fantasyin mostinjuredUsuallyfarmingclosureobject defenceuse of Medical<body> evidentbe usedkeyCodesixteenIslamic#000000entire widely active (typeofone cancolor =speakerextendsPhysicsterrain<tbody>funeralviewingmiddle cricketprophetshifteddoctorsRussell targetcompactalgebrasocial-bulk ofman and</td> he left).val()false);logicalbankinghome tonaming Arizonacredits); }); founderin turnCollinsbefore But thechargedTitle">CaptainspelledgoddessTag -->Adding:but wasRecent patientback in=false&Lincolnwe knowCounterJudaismscript altered']); has theunclearEvent',both innot all <!-- placinghard to centersort ofclientsstreetsBernardassertstend tofantasydown inharbourFreedomjewelry/about..searchlegendsis mademodern only ononly toimage" linear painterand notrarely acronymdelivershorter00&amp;as manywidth="/* <![Ctitle =of the lowest picked escapeduses ofpeoples PublicMatthewtacticsdamagedway forlaws ofeasy to windowstrong simple}catch(seventhinfoboxwent topaintedcitizenI don'tretreat. Some ww."); bombingmailto:made in. Many carries||{};wiwork ofsynonymdefeatsfavoredopticalpageTraunless sendingleft"><comScorAll thejQuery.touristClassicfalse" Wilhelmsuburbsgenuinebishops.split(global followsbody ofnominalContactsecularleft tochiefly-hidden-banner</li> . When in bothdismissExplorealways via thespañolwelfareruling arrangecaptainhis sonrule ofhe tookitself,=0&amp;(calledsamplesto makecom/pagMartin Kennedyacceptsfull ofhandledBesides//--></able totargetsessencehim to its by common.mineralto takeways tos.org/ladvisedpenaltysimple:if theyLettersa shortHerbertstrikes groups.lengthflightsoverlapslowly lesser social </p> it intoranked rate oful> attemptpair ofmake itKontaktAntoniohaving ratings activestreamstrapped").css(hostilelead tolittle groups,Picture--> rows=" objectinverse<footerCustomV><\/scrsolvingChamberslaverywoundedwhereas!= 'undfor allpartly -right:Arabianbacked centuryunit ofmobile-Europe,is homerisk ofdesiredClintoncost ofage of become none ofp&quot;Middle ead')[0Criticsstudios>&copy;group">assemblmaking pressedwidget.ps:" ? rebuiltby someFormer editorsdelayedCanonichad thepushingclass="but arepartialBabylonbottom carrierCommandits useAs withcoursesa thirddenotesalso inHouston20px;">accuseddouble goal ofFamous ).bind(priests Onlinein Julyst + "gconsultdecimalhelpfulrevivedis veryr'+'iptlosing femalesis alsostringsdays ofarrivalfuture <objectforcingString(" /> here isencoded. The balloondone by/commonbgcolorlaw of Indianaavoidedbut the2px 3pxjquery.after apolicy.men andfooter-= true;for usescreen.Indian image =family,http:// &nbsp;driverseternalsame asnoticedviewers})(); is moreseasonsformer the newis justconsent Searchwas thewhy theshippedbr><br>width: height=made ofcuisineis thata very Admiral fixed;normal MissionPress, ontariocharsettry to invaded="true"spacingis mosta more totallyfall of}); immensetime inset outsatisfyto finddown tolot of Playersin Junequantumnot thetime todistantFinnishsrc = (single help ofGerman law andlabeledforestscookingspace">header-well asStanleybridges/globalCroatia About [0]; it, andgroupedbeing a){throwhe madelighterethicalFFFFFF"bottom"like a employslive inas seenprintermost ofub-linkrejectsand useimage">succeedfeedingNuclearinformato helpWomen'sNeitherMexicanprotein<table by manyhealthylawsuitdevised.push({sellerssimply Through.cookie Image(older">us.js"> Since universlarger open to!-- endlies in']); marketwho is ("DOMComanagedone fortypeof Kingdomprofitsproposeto showcenter;made itdressedwere inmixtureprecisearisingsrc = 'make a securedBaptistvoting var March 2grew upClimate.removeskilledway the</head>face ofacting right">to workreduceshas haderectedshow();action=book ofan area== "htt<header <html>conformfacing cookie.rely onhosted .customhe wentbut forspread Family a meansout theforums.footage">MobilClements" id="as highintense--><!--female is seenimpliedset thea stateand hisfastestbesidesbutton_bounded"><img Infoboxevents,a youngand areNative cheaperTimeoutand hasengineswon the(mostlyright: find a -bottomPrince area ofmore ofsearch_nature,legallyperiod,land ofor withinducedprovingmissilelocallyAgainstthe wayk&quot;px;"> pushed abandonnumeralCertainIn thismore inor somename isand, incrownedISBN 0-createsOctobermay notcenter late inDefenceenactedwish tobroadlycoolingonload=it. TherecoverMembersheight assumes<html> people.in one =windowfooter_a good reklamaothers,to this_cookiepanel">London,definescrushedbaptismcoastalstatus title" move tolost inbetter impliesrivalryservers SystemPerhapses and contendflowinglasted rise inGenesisview ofrising seem tobut in backinghe willgiven agiving cities.flow of Later all butHighwayonly bysign ofhe doesdiffersbattery&amp;lasinglesthreatsintegertake onrefusedcalled =US&ampSee thenativesby thissystem.head of:hover,lesbiansurnameand allcommon/header__paramsHarvard/pixel.removalso longrole ofjointlyskyscraUnicodebr /> AtlantanucleusCounty,purely count">easily build aonclicka givenpointerh&quot;events else { ditionsnow the, with man whoorg/Webone andcavalryHe diedseattle00,000 {windowhave toif(windand itssolely m&quot;renewedDetroitamongsteither them inSenatorUs</a><King ofFrancis-produche usedart andhim andused byscoringat hometo haverelatesibilityfactionBuffalolink"><what hefree toCity ofcome insectorscountedone daynervoussquare };if(goin whatimg" alis onlysearch/tuesdaylooselySolomonsexual - <a hrmedium"DO NOT France,with a war andsecond take a > market.highwaydone inctivity"last">obligedrise to"undefimade to Early praisedin its for hisathleteJupiterYahoo! termed so manyreally s. The a woman?value=direct right" bicycleacing="day andstatingRather,higher Office are nowtimes, when a pay foron this-link">;borderaround annual the Newput the.com" takin toa brief(in thegroups.; widthenzymessimple in late{returntherapya pointbanninginks"> ();" rea place\u003Caabout atr> ccount gives a<SCRIPTRailwaythemes/toolboxById("xhumans,watchesin some if (wicoming formats Under but hashanded made bythan infear ofdenoted/iframeleft involtagein eacha&quot;base ofIn manyundergoregimesaction </p> <ustomVa;&gt;</importsor thatmostly &amp;re size="</a></ha classpassiveHost = WhetherfertileVarious=[];(fucameras/></td>acts asIn some> <!organis <br />Beijingcatalàdeutscheuropeueuskaragaeilgesvenskaespañamensajeusuariotrabajoméxicopáginasiempresistemaoctubreduranteañadirempresamomentonuestroprimeratravésgraciasnuestraprocesoestadoscalidadpersonanúmeroacuerdomúsicamiembroofertasalgunospaísesejemploderechoademásprivadoagregarenlacesposiblehotelessevillaprimeroúltimoeventosarchivoculturamujeresentradaanuncioembargomercadograndesestudiomejoresfebrerodiseñoturismocódigoportadaespaciofamiliaantoniopermiteguardaralgunaspreciosalguiensentidovisitastítuloconocersegundoconsejofranciaminutossegundatenemosefectosmálagasesiónrevistagranadacompraringresogarcíaacciónecuadorquienesinclusodeberámateriahombresmuestrapodríamañanaúltimaestamosoficialtambienningúnsaludospodemosmejorarpositionbusinesshomepagesecuritylanguagestandardcampaignfeaturescategoryexternalchildrenreservedresearchexchangefavoritetemplatemilitaryindustryservicesmaterialproductsz-index:commentssoftwarecompletecalendarplatformarticlesrequiredmovementquestionbuildingpoliticspossiblereligionphysicalfeedbackregisterpicturesdisabledprotocolaudiencesettingsactivityelementslearninganythingabstractprogressoverviewmagazineeconomictrainingpressurevarious <strong>propertyshoppingtogetheradvancedbehaviordownloadfeaturedfootballselectedLanguagedistanceremembertrackingpasswordmodifiedstudentsdirectlyfightingnortherndatabasefestivalbreakinglocationinternetdropdownpracticeevidencefunctionmarriageresponseproblemsnegativeprogramsanalysisreleasedbanner">purchasepoliciesregionalcreativeargumentbookmarkreferrerchemicaldivisioncallbackseparateprojectsconflicthardwareinterestdeliverymountainobtained= false;for(var acceptedcapacitycomputeridentityaircraftemployedproposeddomesticincludesprovidedhospitalverticalcollapseapproachpartnerslogo"><adaughterauthor" culturalfamilies/images/assemblypowerfulteachingfinisheddistrictcriticalcgi-bin/purposesrequireselectionbecomingprovidesacademicexerciseactuallymedicineconstantaccidentMagazinedocumentstartingbottom">observed: &quot;extendedpreviousSoftwarecustomerdecisionstrengthdetailedslightlyplanningtextareacurrencyeveryonestraighttransferpositiveproducedheritageshippingabsolutereceivedrelevantbutton" violenceanywherebenefitslaunchedrecentlyalliancefollowedmultiplebulletinincludedoccurredinternal$(this).republic><tr><tdcongressrecordedultimatesolution<ul id="discoverHome</a>websitesnetworksalthoughentirelymemorialmessagescontinueactive">somewhatvictoriaWestern title="LocationcontractvisitorsDownloadwithout right"> measureswidth = variableinvolvedvirginianormallyhappenedaccountsstandingnationalRegisterpreparedcontrolsaccuratebirthdaystrategyofficialgraphicscriminalpossiblyconsumerPersonalspeakingvalidateachieved.jpg" />machines</h2> keywordsfriendlybrotherscombinedoriginalcomposedexpectedadequatepakistanfollow" valuable</label>relativebringingincreasegovernorplugins/List of Header">" name=" (&quot;graduate</head> commercemalaysiadirectormaintain;height:schedulechangingback to catholicpatternscolor: #greatestsuppliesreliable</ul> <select citizensclothingwatching<li id="specificcarryingsentence<center>contrastthinkingcatch(e)southernMichael merchantcarouselpadding:interior.split("lizationOctober ){returnimproved--&gt; coveragechairman.png" />subjectsRichard whateverprobablyrecoverybaseballjudgmentconnect..css" /> websitereporteddefault"/></a> electricscotlandcreationquantity. ISBN 0did not instance-search-" lang="speakersComputercontainsarchivesministerreactiondiscountItalianocriteriastrongly: 'http:'script'coveringofferingappearedBritish identifyFacebooknumerousvehiclesconcernsAmericanhandlingdiv id="William provider_contentaccuracysection andersonflexibleCategorylawrence<script>layout="approved maximumheader"></table>Serviceshamiltoncurrent canadianchannels/themes//articleoptionalportugalvalue=""intervalwirelessentitledagenciesSearch" measuredthousandspending&hellip;new Date" size="pageNamemiddle" " /></a>hidden">sequencepersonaloverflowopinionsillinoislinks"> <title>versionssaturdayterminalitempropengineersectionsdesignerproposal="false"Españolreleasessubmit" er&quot;additionsymptomsorientedresourceright"><pleasurestationshistory.leaving border=contentscenter">. Some directedsuitablebulgaria.show();designedGeneral conceptsExampleswilliamsOriginal"><span>search">operatorrequestsa &quot;allowingDocumentrevision. The yourselfContact michiganEnglish columbiapriorityprintingdrinkingfacilityreturnedContent officersRussian generate-8859-1"indicatefamiliar qualitymargin:0 contentviewportcontacts-title">portable.length eligibleinvolvesatlanticonload="default.suppliedpaymentsglossary After guidance</td><tdencodingmiddle">came to displaysscottishjonathanmajoritywidgets.clinicalthailandteachers<head> affectedsupportspointer;toString</small>oklahomawill be investor0" alt="holidaysResourcelicensed (which . After considervisitingexplorerprimary search" android"quickly meetingsestimate;return ;color:# height=approval, &quot; checked.min.js"magnetic></a></hforecast. While thursdaydvertise&eacute;hasClassevaluateorderingexistingpatients Online coloradoOptions"campbell<!-- end</span><<br /> _popups|sciences,&quot; quality Windows assignedheight: <b classle&quot; value=" Companyexamples<iframe believespresentsmarshallpart of properly). The taxonomymuch of </span> " data-srtuguêsscrollTo project<head> attorneyemphasissponsorsfancyboxworld's wildlifechecked=sessionsprogrammpx;font- Projectjournalsbelievedvacationthompsonlightingand the special border=0checking</tbody><button Completeclearfix <head> article <sectionfindingsrole in popular Octoberwebsite exposureused to changesoperatedclickingenteringcommandsinformed numbers </div>creatingonSubmitmarylandcollegesanalyticlistingscontact.loggedInadvisorysiblingscontent"s&quot;)s. This packagescheckboxsuggestspregnanttomorrowspacing=icon.pngjapanesecodebasebutton">gamblingsuch as , while </span> missourisportingtop:1px .</span>tensionswidth="2lazyloadnovemberused in height="cript"> &nbsp;</<tr><td height:2/productcountry include footer" &lt;!-- title"></jquery.</form> (简体)(繁體)hrvatskiitalianoromânătürkçeاردوtambiénnoticiasmensajespersonasderechosnacionalserviciocontactousuariosprogramagobiernoempresasanunciosvalenciacolombiadespuésdeportesproyectoproductopúbliconosotroshistoriapresentemillonesmediantepreguntaanteriorrecursosproblemasantiagonuestrosopiniónimprimirmientrasaméricavendedorsociedadrespectorealizarregistropalabrasinterésentoncesespecialmiembrosrealidadcórdobazaragozapáginassocialesbloqueargestiónalquilersistemascienciascompletoversióncompletaestudiospúblicaobjetivoalicantebuscadorcantidadentradasaccionesarchivossuperiormayoríaalemaniafunciónúltimoshaciendoaquellosediciónfernandoambientefacebooknuestrasclientesprocesosbastantepresentareportarcongresopublicarcomerciocontratojóvenesdistritotécnicaconjuntoenergíatrabajarasturiasrecienteutilizarboletínsalvadorcorrectatrabajosprimerosnegocioslibertaddetallespantallapróximoalmeríaanimalesquiénescorazónsecciónbuscandoopcionesexteriorconceptotodavíagaleríaescribirmedicinalicenciaconsultaaspectoscríticadólaresjusticiadeberánperíodonecesitamantenerpequeñorecibidatribunaltenerifecancióncanariasdescargadiversosmallorcarequieretécnicodeberíaviviendafinanzasadelantefuncionaconsejosdifícilciudadesantiguasavanzadatérminounidadessánchezcampañasoftonicrevistascontienesectoresmomentosfacultadcréditodiversassupuestofactoressegundospequeñaгодаеслиестьбылобытьэтомЕслитогоменявсехэтойдажебылигодуденьэтотбыласебяодинсебенадосайтфотонегосвоисвойигрытожевсемсвоюлишьэтихпокаднейдомамиралиботемухотядвухсетилюдиделомиретебясвоевидечегоэтимсчеттемыценысталведьтемеводытебевышенамитипатомуправлицаоднагодызнаюмогудругвсейидеткиноодноделаделесрокиюнявесьЕстьразанашиاللهالتيجميعخاصةالذيعليهجديدالآنالردتحكمصفحةكانتاللييكونشبكةفيهابناتحواءأكثرخلالالحبدليلدروساضغطتكونهناكساحةناديالطبعليكشكرايمكنمنهاشركةرئيسنشيطماذاالفنشبابتعبررحمةكافةيقولمركزكلمةأحمدقلبييعنيصورةطريقشاركجوالأخرىمعناابحثعروضبشكلمسجلبنانخالدكتابكليةبدونأيضايوجدفريقكتبتأفضلمطبخاكثرباركافضلاحلىنفسهأيامردودأنهاديناالانمعرضتعلمداخلممكن  resourcescountriesquestionsequipmentcommunityavailablehighlightDTD/xhtmlmarketingknowledgesomethingcontainerdirectionsubscribeadvertisecharacter" value="</select>Australia" class="situationauthorityfollowingprimarilyoperationchallengedevelopedanonymousfunction functionscompaniesstructureagreement" title="potentialeducationargumentssecondarycopyrightlanguagesexclusivecondition</form> statementattentionBiography} else { solutionswhen the Analyticstemplatesdangeroussatellitedocumentspublisherimportantprototypeinfluence&raquo;</effectivegenerallytransformbeautifultransportorganizedpublishedprominentuntil thethumbnailNational .focus();over the migrationannouncedfooter"> exceptionless thanexpensiveformationframeworkterritoryndicationcurrentlyclassNamecriticismtraditionelsewhereAlexanderappointedmaterialsbroadcastmentionedaffiliate</option>treatmentdifferent/default.Presidentonclick="biographyotherwisepermanentFrançaisHollywoodexpansionstandards</style> reductionDecember preferredCambridgeopponentsBusiness confusion> <title>presentedexplaineddoes not worldwideinterfacepositionsnewspaper</table> mountainslike the essentialfinancialselectionaction="/abandonedEducationparseInt(stabilityunable to</title> relationsNote thatefficientperformedtwo yearsSince thethereforewrapper">alternateincreasedBattle ofperceivedtrying tonecessaryportrayedelectionsElizabeth</iframe>discoveryinsurances.length;legendaryGeographycandidatecorporatesometimesservices.inherited</strong>CommunityreligiouslocationsCommitteebuildingsthe worldno longerbeginningreferencecannot befrequencytypicallyinto the relative;recordingpresidentinitiallytechniquethe otherit can beexistenceunderlinethis timetelephoneitemscopepracticesadvantage);return For otherprovidingdemocracyboth the extensivesufferingsupportedcomputers functionpracticalsaid thatit may beEnglish</from the scheduleddownloads</label> suspectedmargin: 0spiritual</head> microsoftgraduallydiscussedhe becameexecutivejquery.jshouseholdconfirmedpurchasedliterallydestroyedup to thevariationremainingit is notcenturiesJapanese among thecompletedalgorithminterestsrebellionundefinedencourageresizableinvolvingsensitiveuniversalprovision(althoughfeaturingconducted), which continued-header">February numerous overflow:componentfragmentsexcellentcolspan="technicalnear the Advanced source ofexpressedHong Kong Facebookmultiple mechanismelevationoffensive</form> sponsoreddocument.or &quot;there arethose whomovementsprocessesdifficultsubmittedrecommendconvincedpromoting" width=".replace(classicalcoalitionhis firstdecisionsassistantindicatedevolution-wrapper"enough toalong thedelivered--> <!--American protectedNovember </style><furnitureInternet onblur="suspendedrecipientbased on Moreover,abolishedcollectedwere madeemotionalemergencynarrativeadvocatespx;bordercommitteddir="ltr"employeesresearch. selectedsuccessorcustomersdisplayedSeptemberaddClass(Facebook suggestedand lateroperatingelaborateSometimesInstitutecertainlyinstalledfollowersJerusalemthey havecomputinggeneratedprovincesguaranteearbitraryrecognizewanted topx;width:theory ofbehaviourWhile theestimatedbegan to it becamemagnitudemust havemore thanDirectoryextensionsecretarynaturallyoccurringvariablesgiven theplatform.</label><failed tocompoundskinds of societiesalongside --&gt; southwestthe rightradiationmay have unescape(spoken in" href="/programmeonly the come fromdirectoryburied ina similarthey were</font></Norwegianspecifiedproducingpassenger(new DatetemporaryfictionalAfter theequationsdownload.regularlydeveloperabove thelinked tophenomenaperiod oftooltip">substanceautomaticaspect ofAmong theconnectedestimatesAir Forcesystem ofobjectiveimmediatemaking itpaintingsconqueredare stillproceduregrowth ofheaded byEuropean divisionsmoleculesfranchiseintentionattractedchildhoodalso useddedicatedsingaporedegree offather ofconflicts</a></p> came fromwere usednote thatreceivingExecutiveeven moreaccess tocommanderPoliticalmusiciansdeliciousprisonersadvent ofUTF-8" /><![CDATA[">ContactSouthern bgcolor="series of. It was in Europepermittedvalidate.appearingofficialsseriously-languageinitiatedextendinglong-terminflationsuch thatgetCookiemarked by</button>implementbut it isincreasesdown the requiringdependent--> <!-- interviewWith the copies ofconsensuswas builtVenezuela(formerlythe statepersonnelstrategicfavour ofinventionWikipediacontinentvirtuallywhich wasprincipleComplete identicalshow thatprimitiveaway frommolecularpreciselydissolvedUnder theversion=">&nbsp;</It is the This is will haveorganismssome timeFriedrichwas firstthe only fact thatform id="precedingTechnicalphysicistoccurs innavigatorsection">span id="sought tobelow thesurviving}</style>his deathas in thecaused bypartiallyexisting using thewas givena list oflevels ofnotion ofOfficial dismissedscientistresemblesduplicateexplosiverecoveredall othergalleries{padding:people ofregion ofaddressesassociateimg alt="in modernshould bemethod ofreportingtimestampneeded tothe Greatregardingseemed toviewed asimpact onidea thatthe Worldheight ofexpandingThese arecurrent">carefullymaintainscharge ofClassicaladdressedpredictedownership<div id="right"> residenceleave thecontent">are often })(); probably Professor-button" respondedsays thathad to beplaced inHungarianstatus ofserves asUniversalexecutionaggregatefor whichinfectionagreed tohowever, popular">placed onconstructelectoralsymbol ofincludingreturn toarchitectChristianprevious living ineasier toprofessor &lt;!-- effect ofanalyticswas takenwhere thetook overbelief inAfrikaansas far aspreventedwork witha special<fieldsetChristmasRetrieved In the back intonortheastmagazines><strong>committeegoverninggroups ofstored inestablisha generalits firsttheir ownpopulatedan objectCaribbeanallow thedistrictswisconsinlocation.; width: inhabitedSocialistJanuary 1</footer>similarlychoice ofthe same specific business The first.length; desire todeal withsince theuserAgentconceivedindex.phpas &quot;engage inrecently,few yearswere also <head> <edited byare knowncities inaccesskeycondemnedalso haveservices,family ofSchool ofconvertednature of languageministers</object>there is a popularsequencesadvocatedThey wereany otherlocation=enter themuch morereflectedwas namedoriginal a typicalwhen theyengineerscould notresidentswednesdaythe third productsJanuary 2what theya certainreactionsprocessorafter histhe last contained"></div> </a></td>depend onsearch"> pieces ofcompetingReferencetennesseewhich has version=</span> <</header>gives thehistorianvalue="">padding:0view thattogether,the most was foundsubset ofattack onchildren,points ofpersonal position:allegedlyClevelandwas laterand afterare givenwas stillscrollingdesign ofmakes themuch lessAmericans. After , but theMuseum oflouisiana(from theminnesotaparticlesa processDominicanvolume ofreturningdefensive00px|righmade frommouseover" style="states of(which iscontinuesFranciscobuilding without awith somewho woulda form ofa part ofbefore itknown as Serviceslocation and oftenmeasuringand it ispaperbackvalues of <title>= window.determineer&quot; played byand early</center>from thisthe threepower andof &quot;innerHTML<a href="y:inline;Church ofthe eventvery highofficial -height: content="/cgi-bin/to createafrikaansesperantofrançaislatviešulietuviųČeštinačeštinaไทย日本語简体字繁體字한국어为什么计算机笔记本討論區服务器互联网房地产俱乐部出版社排行榜部落格进一步支付宝验证码委员会数据库消费者办公室讨论区深圳市播放器北京市大学生越来越管理员信息网serviciosartículoargentinabarcelonacualquierpublicadoproductospolíticarespuestawikipediasiguientebúsquedacomunidadseguridadprincipalpreguntascontenidorespondervenezuelaproblemasdiciembrerelaciónnoviembresimilaresproyectosprogramasinstitutoactividadencuentraeconomíaimágenescontactardescargarnecesarioatenciónteléfonocomisióncancionescapacidadencontraranálisisfavoritostérminosprovinciaetiquetaselementosfuncionesresultadocarácterpropiedadprincipionecesidadmunicipalcreacióndescargaspresenciacomercialopinionesejercicioeditorialsalamancagonzálezdocumentopelícularecientesgeneralestarragonaprácticanovedadespropuestapacientestécnicasobjetivoscontactosमेंलिएहैंगयासाथएवंरहेकोईकुछरहाबादकहासभीहुएरहीमैंदिनबातdiplodocsसमयरूपनामपताफिरऔसततरहलोगहुआबारदेशहुईखेलयदिकामवेबतीनबीचमौतसाललेखजॉबमददतथानहीशहरअलगकभीनगरपासरातकिएउसेगयीहूँआगेटीमखोजकारअभीगयेतुमवोटदेंअगरऐसेमेललगाहालऊपरचारऐसादेरजिसदिलबंदबनाहूंलाखजीतबटनमिलइसेआनेनयाकुललॉगभागरेलजगहरामलगेपेजहाथइसीसहीकलाठीकहाँदूरतहतसातयादआयापाककौनशामदेखयहीरायखुदलगीcategoriesexperience</title> Copyright javascriptconditionseverything<p class="technologybackground<a class="management&copy; 201javaScriptcharactersbreadcrumbthemselveshorizontalgovernmentCaliforniaactivitiesdiscoveredNavigationtransitionconnectionnavigationappearance</title><mcheckbox" techniquesprotectionapparentlyas well asunt', 'UA-resolutionoperationstelevisiontranslatedWashingtonnavigator. = window.impression&lt;br&gt;literaturepopulationbgcolor="#especially content="productionnewsletterpropertiesdefinitionleadershipTechnologyParliamentcomparisonul class=".indexOf("conclusiondiscussioncomponentsbiologicalRevolution_containerunderstoodnoscript><permissioneach otheratmosphere onfocus="<form id="processingthis.valuegenerationConferencesubsequentwell-knownvariationsreputationphenomenondisciplinelogo.png" (document,boundariesexpressionsettlementBackgroundout of theenterprise("https:" unescape("password" democratic<a href="/wrapper"> membershiplinguisticpx;paddingphilosophyassistanceuniversityfacilitiesrecognizedpreferenceif (typeofmaintainedvocabularyhypothesis.submit();&amp;nbsp;annotationbehind theFoundationpublisher"assumptionintroducedcorruptionscientistsexplicitlyinstead ofdimensions onClick="considereddepartmentoccupationsoon afterinvestmentpronouncedidentifiedexperimentManagementgeographic" height="link rel=".replace(/depressionconferencepunishmenteliminatedresistanceadaptationoppositionwell knownsupplementdeterminedh1 class="0px;marginmechanicalstatisticscelebratedGovernment During tdevelopersartificialequivalentoriginatedCommissionattachment<span id="there wereNederlandsbeyond theregisteredjournalistfrequentlyall of thelang="en" </style> absolute; supportingextremely mainstream</strong> popularityemployment</table> colspan="</form> conversionabout the </p></div>integrated" lang="enPortuguesesubstituteindividualimpossiblemultimediaalmost allpx solid #apart fromsubject toin Englishcriticizedexcept forguidelinesoriginallyremarkablethe secondh2 class="<a title="(includingparametersprohibited= "http://dictionaryperceptionrevolutionfoundationpx;height:successfulsupportersmillenniumhis fatherthe &quot;no-repeat;commercialindustrialencouragedamount of unofficialefficiencyReferencescoordinatedisclaimerexpeditiondevelopingcalculatedsimplifiedlegitimatesubstring(0" class="completelyillustratefive yearsinstrumentPublishing1" class="psychologyconfidencenumber of absence offocused onjoined thestructurespreviously></iframe>once againbut ratherimmigrantsof course,a group ofLiteratureUnlike the</a>&nbsp; function it was theConventionautomobileProtestantaggressiveafter the Similarly," /></div>collection functionvisibilitythe use ofvolunteersattractionunder the threatened*<![CDATA[importancein generalthe latter</form> </.indexOf('i = 0; i <differencedevoted totraditionssearch forultimatelytournamentattributesso-called } </style>evaluationemphasizedaccessible</section>successionalong withMeanwhile,industries</a><br />has becomeaspects ofTelevisionsufficientbasketballboth sidescontinuingan article<img alt="adventureshis mothermanchesterprinciplesparticularcommentaryeffects ofdecided to"><strong>publishersJournal ofdifficultyfacilitateacceptablestyle.css" function innovation>Copyrightsituationswould havebusinessesDictionarystatementsoften usedpersistentin Januarycomprising</title> diplomaticcontainingperformingextensionsmay not beconcept of onclick="It is alsofinancial making theLuxembourgadditionalare calledengaged in"script");but it waselectroniconsubmit=" <!-- End electricalofficiallysuggestiontop of theunlike theAustralianOriginallyreferences </head> recognisedinitializelimited toAlexandriaretirementAdventuresfour years &lt;!-- increasingdecorationh3 class="origins ofobligationregulationclassified(function(advantagesbeing the historians<base hrefrepeatedlywilling tocomparabledesignatednominationfunctionalinside therevelationend of thes for the authorizedrefused totake placeautonomouscompromisepolitical restauranttwo of theFebruary 2quality ofswfobject.understandnearly allwritten byinterviews" width="1withdrawalfloat:leftis usuallycandidatesnewspapersmysteriousDepartmentbest knownparliamentsuppressedconvenientremembereddifferent systematichas led topropagandacontrolledinfluencesceremonialproclaimedProtectionli class="Scientificclass="no-trademarksmore than widespreadLiberationtook placeday of theas long asimprisonedAdditional <head> <mLaboratoryNovember 2exceptionsIndustrialvariety offloat: lefDuring theassessmenthave been deals withStatisticsoccurrence/ul></div>clearfix">the publicmany yearswhich wereover time,synonymouscontent"> presumablyhis familyuserAgent.unexpectedincluding challengeda minorityundefined"belongs totaken fromin Octoberposition: said to bereligious Federation rowspan="only a fewmeant thatled to the--> <div <fieldset>Archbishop class="nobeing usedapproachesprivilegesnoscript> results inmay be theEaster eggmechanismsreasonablePopulationCollectionselected">noscript> /index.phparrival of-jssdk'));managed toincompletecasualtiescompletionChristiansSeptember arithmeticproceduresmight haveProductionit appearsPhilosophyfriendshipleading togiving thetoward theguaranteeddocumentedcolor:#000video gamecommissionreflectingchange theassociatedsans-serifonkeypress; padding:He was theunderlyingtypically , and the srcElementsuccessivesince the should be networkingaccountinguse of thelower thanshows that</span> complaintscontinuousquantitiesastronomerhe did notdue to itsapplied toan averageefforts tothe futureattempt toTherefore,capabilityRepublicanwas formedElectronickilometerschallengespublishingthe formerindigenousdirectionssubsidiaryconspiracydetails ofand in theaffordablesubstancesreason forconventionitemtype="absolutelysupposedlyremained aattractivetravellingseparatelyfocuses onelementaryapplicablefound thatstylesheetmanuscriptstands for no-repeat(sometimesCommercialin Americaundertakenquarter ofan examplepersonallyindex.php?</button> percentagebest-knowncreating a" dir="ltrLieutenant <div id="they wouldability ofmade up ofnoted thatclear thatargue thatto anotherchildren'spurpose offormulatedbased uponthe regionsubject ofpassengerspossession. In the Before theafterwardscurrently across thescientificcommunity.capitalismin Germanyright-wingthe systemSociety ofpoliticiandirection:went on toremoval of New York apartmentsindicationduring theunless thehistoricalhad been adefinitiveingredientattendanceCenter forprominencereadyStatestrategiesbut in theas part ofconstituteclaim thatlaboratorycompatiblefailure of, such as began withusing the to providefeature offrom which/" class="geologicalseveral ofdeliberateimportant holds thating&quot; valign=topthe Germanoutside ofnegotiatedhis careerseparationid="searchwas calledthe fourthrecreationother thanpreventionwhile the education,connectingaccuratelywere builtwas killedagreementsmuch more Due to thewidth: 100some otherKingdom ofthe entirefamous forto connectobjectivesthe Frenchpeople andfeatured">is said tostructuralreferendummost oftena separate-> <div id Official worldwide.aria-labelthe planetand it wasd" value="looking atbeneficialare in themonitoringreportedlythe modernworking onallowed towhere the innovative</a></div>soundtracksearchFormtend to beinput id="opening ofrestrictedadopted byaddressingtheologianmethods ofvariant ofChristian very largeautomotiveby far therange frompursuit offollow thebrought toin Englandagree thataccused ofcomes frompreventingdiv style=his or hertremendousfreedom ofconcerning0 1em 1em;Basketball/style.cssan earliereven after/" title=".com/indextaking thepittsburghcontent"> <script>(fturned outhaving the</span> occasionalbecause itstarted tophysically></div> created byCurrently, bgcolor="tabindex="disastrousAnalytics also has a><div id="</style> <called forsinger and.src = "//violationsthis pointconstantlyis locatedrecordingsd from thenederlandsportuguêsעבריתفارسیdesarrollocomentarioeducaciónseptiembreregistradodirecciónubicaciónpublicidadrespuestasresultadosimportantereservadosartículosdiferentessiguientesrepúblicasituaciónministerioprivacidaddirectorioformaciónpoblaciónpresidentecontenidosaccesoriostechnoratipersonalescategoríaespecialesdisponibleactualidadreferenciavalladolidbibliotecarelacionescalendariopolíticasanterioresdocumentosnaturalezamaterialesdiferenciaeconómicatransporterodríguezparticiparencuentrandiscusiónestructurafundaciónfrecuentespermanentetotalmenteможнобудетможетвремятакжечтобыболееоченьэтогокогдапослевсегосайтечерезмогутсайтажизнимеждубудутПоискздесьвидеосвязинужносвоейлюдейпорномногодетейсвоихправатакойместоимеетжизньоднойлучшепередчастичастьработновыхправособойпотомменеечисленовыеуслугоколоназадтакоетогдапочтиПослетакиеновыйстоиттакихсразуСанктфорумКогдакнигислованашейнайтисвоимсвязьлюбойчастосредиКромеФорумрынкесталипоисктысячмесяццентртрудасамыхрынкаНовыйчасовместафильммартастранместетекстнашихминутимениимеютномергородсамомэтомуконцесвоемкакойАрхивمنتدىإرسالرسالةالعامكتبهابرامجاليومالصورجديدةالعضوإضافةالقسمالعابتحميلملفاتملتقىتعديلالشعرأخبارتطويرعليكمإرفاقطلباتاللغةترتيبالناسالشيخمنتديالعربالقصصافلامعليهاتحديثاللهمالعملمكتبةيمكنكالطفلفيديوإدارةتاريخالصحةتسجيلالوقتعندمامدينةتصميمأرشيفالذينعربيةبوابةألعابالسفرمشاكلتعالىالأولالسنةجامعةالصحفالدينكلماتالخاصالملفأعضاءكتابةالخيررسائلالقلبالأدبمقاطعمراسلمنطقةالكتبالرجلاشتركالقدميعطيكsByTagName(.jpg" alt="1px solid #.gif" alt="transparentinformationapplication" onclick="establishedadvertising.png" alt="environmentperformanceappropriate&amp;mdash;immediately</strong></rather thantemperaturedevelopmentcompetitionplaceholdervisibility:copyright">0" height="even thoughreplacementdestinationCorporation<ul class="AssociationindividualsperspectivesetTimeout(url(http://mathematicsmargin-top:eventually description) no-repeatcollections.JPG|thumb|participate/head><bodyfloat:left;<li class="hundreds of However, compositionclear:both;cooperationwithin the label for="border-top:New Zealandrecommendedphotographyinteresting&lt;sup&gt;controversyNetherlandsalternativemaxlength="switzerlandDevelopmentessentially Although </textarea>thunderbirdrepresented&amp;ndash;speculationcommunitieslegislationelectronics <div id="illustratedengineeringterritoriesauthoritiesdistributed6" height="sans-serif;capable of disappearedinteractivelooking forit would beAfghanistanwas createdMath.floor(surroundingcan also beobservationmaintenanceencountered<h2 class="more recentit has beeninvasion of).getTime()fundamentalDespite the"><div id="inspirationexaminationpreparationexplanation<input id="</a></span>versions ofinstrumentsbefore the = 'http://Descriptionrelatively .substring(each of theexperimentsinfluentialintegrationmany peopledue to the combinationdo not haveMiddle East<noscript><copyright" perhaps theinstitutionin Decemberarrangementmost famouspersonalitycreation oflimitationsexclusivelysovereignty-content"> <td class="undergroundparallel todoctrine ofoccupied byterminologyRenaissancea number ofsupport forexplorationrecognitionpredecessor<img src="/<h1 class="publicationmay also bespecialized</fieldset>progressivemillions ofstates thatenforcementaround the one another.parentNodeagricultureAlternativeresearcherstowards theMost of themany other (especially<td width=";width:100%independent<h3 class=" onchange=").addClass(interactionOne of the daughter ofaccessoriesbranches of <div id="the largestdeclarationregulationsInformationtranslationdocumentaryin order to"> <head> <" height="1across the orientation);</script>implementedcan be seenthere was ademonstratecontainer">connectionsthe Britishwas written!important;px; margin-followed byability to complicatedduring the immigrationalso called<h4 class="distinctionreplaced bygovernmentslocation ofin Novemberwhether the</p> </div>acquisitioncalled the persecutiondesignation{font-size:appeared ininvestigateexperiencedmost likelywidely useddiscussionspresence of (document.extensivelyIt has beenit does notcontrary toinhabitantsimprovementscholarshipconsumptioninstructionfor exampleone or morepx; paddingthe currenta series ofare usuallyrole in thepreviously derivativesevidence ofexperiencescolorschemestated thatcertificate</a></div> selected="high schoolresponse tocomfortableadoption ofthree yearsthe countryin Februaryso that thepeople who provided by<param nameaffected byin terms ofappointmentISO-8859-1"was born inhistorical regarded asmeasurementis based on and other : function(significantcelebrationtransmitted/js/jquery.is known astheoretical tabindex="it could be<noscript> having been <head> < &quot;The compilationhe had beenproduced byphilosopherconstructedintended toamong othercompared toto say thatEngineeringa differentreferred todifferencesbelief thatphotographsidentifyingHistory of Republic ofnecessarilyprobabilitytechnicallyleaving thespectacularfraction ofelectricityhead of therestaurantspartnershipemphasis onmost recentshare with saying thatfilled withdesigned toit is often"></iframe>as follows:merged withthrough thecommercial pointed outopportunityview of therequirementdivision ofprogramminghe receivedsetInterval"></span></in New Yorkadditional compression <div id="incorporate;</script><attachEventbecame the " target="_carried outSome of thescience andthe time ofContainer">maintainingChristopherMuch of thewritings of" height="2size of theversion of mixture of between theExamples ofeducationalcompetitive onsubmit="director ofdistinctive/DTD XHTML relating totendency toprovince ofwhich woulddespite thescientific legislature.innerHTML allegationsAgriculturewas used inapproach tointelligentyears later,sans-serifdeterminingPerformanceappearances, which is foundationsabbreviatedhigher thans from the individual composed ofsupposed toclaims thatattributionfont-size:1elements ofHistorical his brotherat the timeanniversarygoverned byrelated to ultimately innovationsit is stillcan only bedefinitionstoGMTStringA number ofimg class="Eventually,was changedoccurred inneighboringdistinguishwhen he wasintroducingterrestrialMany of theargues thatan Americanconquest ofwidespread were killedscreen and In order toexpected todescendantsare locatedlegislativegenerations backgroundmost peopleyears afterthere is nothe highestfrequently they do notargued thatshowed thatpredominanttheologicalby the timeconsideringshort-lived</span></a>can be usedvery littleone of the had alreadyinterpretedcommunicatefeatures ofgovernment,</noscript>entered the" height="3Independentpopulationslarge-scale. Although used in thedestructionpossibilitystarting intwo or moreexpressionssubordinatelarger thanhistory and</option> Continentaleliminatingwill not bepractice ofin front ofsite of theensure thatto create amississippipotentiallyoutstandingbetter thanwhat is nowsituated inmeta name="TraditionalsuggestionsTranslationthe form ofatmosphericideologicalenterprisescalculatingeast of theremnants ofpluginspage/index.php?remained intransformedHe was alsowas alreadystatisticalin favor ofMinistry ofmovement offormulationis required<link rel="This is the <a href="/popularizedinvolved inare used toand severalmade by theseems to belikely thatPalestiniannamed afterit had beenmost commonto refer tobut this isconsecutivetemporarilyIn general,conventionstakes placesubdivisionterritorialoperationalpermanentlywas largelyoutbreak ofin the pastfollowing a xmlns:og="><a class="class="textConversion may be usedmanufactureafter beingclearfix"> question ofwas electedto become abecause of some peopleinspired bysuccessful a time whenmore commonamongst thean officialwidth:100%;technology,was adoptedto keep thesettlementslive birthsindex.html"Connecticutassigned to&amp;times;account foralign=rightthe companyalways beenreturned toinvolvementBecause thethis period" name="q" confined toa result ofvalue="" />is actuallyEnvironment </head> Conversely,> <div id="0" width="1is probablyhave becomecontrollingthe problemcitizens ofpoliticiansreached theas early as:none; over<table cellvalidity ofdirectly toonmousedownwhere it iswhen it wasmembers of relation toaccommodatealong with In the latethe Englishdelicious">this is notthe presentif they areand finallya matter of </div> </script>faster thanmajority ofafter whichcomparativeto maintainimprove theawarded theer" class="frameborderrestorationin the sameanalysis oftheir firstDuring the continentalsequence offunction(){font-size: work on the</script> <begins withjavascript:constituentwas foundedequilibriumassume thatis given byneeds to becoordinatesthe variousare part ofonly in thesections ofis a commontheories ofdiscoveriesassociationedge of thestrength ofposition inpresent-dayuniversallyto form thebut insteadcorporationattached tois commonlyreasons for &quot;the can be madewas able towhich meansbut did notonMouseOveras possibleoperated bycoming fromthe primaryaddition offor severaltransferreda period ofare able tohowever, itshould havemuch larger </script>adopted theproperty ofdirected byeffectivelywas broughtchildren ofProgramminglonger thanmanuscriptswar againstby means ofand most ofsimilar to proprietaryoriginatingprestigiousgrammaticalexperience.to make theIt was alsois found incompetitorsin the U.S.replace thebrought thecalculationfall of thethe generalpracticallyin honor ofreleased inresidentialand some ofking of thereaction to1st Earl ofculture andprincipally</title> they can beback to thesome of hisexposure toare similarform of theaddFavoritecitizenshippart in thepeople within practiceto continue&amp;minus;approved by the first allowed theand for thefunctioningplaying thesolution toheight="0" in his bookmore than afollows thecreated thepresence in&nbsp;</td>nationalistthe idea ofa characterwere forced class="btndays of thefeatured inshowing theinterest inin place ofturn of thethe head ofLord of thepoliticallyhas its ownEducationalapproval ofsome of theeach other,behavior ofand becauseand anotherappeared onrecorded inblack&quot;may includethe world'scan lead torefers to aborder="0" government winning theresulted in while the Washington,the subjectcity in the></div> reflect theto completebecame moreradioactiverejected bywithout anyhis father,which couldcopy of theto indicatea politicalaccounts ofconstitutesworked wither</a></li>of his lifeaccompaniedclientWidthprevent theLegislativedifferentlytogether inhas severalfor anothertext of thefounded thee with the is used forchanged theusually theplace wherewhereas the> <a href=""><a href="themselves,although hethat can betraditionalrole of theas a resultremoveChilddesigned bywest of theSome peopleproduction,side of thenewslettersused by thedown to theaccepted bylive in theattempts tooutside thefrequenciesHowever, inprogrammersat least inapproximatealthough itwas part ofand variousGovernor ofthe articleturned into><a href="/the economyis the mostmost widelywould laterand perhapsrise to theoccurs whenunder whichconditions.the westerntheory thatis producedthe city ofin which heseen in thethe centralbuilding ofmany of hisarea of theis the onlymost of themany of thethe WesternThere is noextended toStatisticalcolspan=2 |short storypossible totopologicalcritical ofreported toa Christiandecision tois equal toproblems ofThis can bemerchandisefor most ofno evidenceeditions ofelements in&quot;. Thecom/images/which makesthe processremains theliterature,is a memberthe popularthe ancientproblems intime of thedefeated bybody of thea few yearsmuch of thethe work ofCalifornia,served as agovernment.concepts ofmovement in <div id="it" value="language ofas they areproduced inis that theexplain thediv></div> However thelead to the <a href="/was grantedpeople havecontinuallywas seen asand relatedthe role ofproposed byof the besteach other.Constantinepeople fromdialects ofto revisionwas renameda source ofthe initiallaunched inprovide theto the westwhere thereand similarbetween twois also theEnglish andconditions,that it wasentitled tothemselves.quantity ofransparencythe same asto join thecountry andthis is theThis led toa statementcontrast tolastIndexOfthrough hisis designedthe term isis providedprotect theng</a></li>The currentthe site ofsubstantialexperience,in the Westthey shouldslovenčinacomentariosuniversidadcondicionesactividadesexperienciatecnologíaproducciónpuntuaciónaplicacióncontraseñacategoríasregistrarseprofesionaltratamientoregístratesecretaríaprincipalesprotecciónimportantesimportanciaposibilidadinteresantecrecimientonecesidadessuscribirseasociacióndisponiblesevaluaciónestudiantesresponsableresoluciónguadalajararegistradosoportunidadcomercialesfotografíaautoridadesingenieríatelevisióncompetenciaoperacionesestablecidosimplementeactualmentenavegaciónconformidadline-height:font-family:" : "http://applicationslink" href="specifically//<![CDATA[ Organizationdistribution0px; height:relationshipdevice-width<div class="<label for="registration</noscript> /index.html"window.open( !important;application/independence//www.googleorganizationautocompleterequirementsconservative<form name="intellectualmargin-left:18th centuryan importantinstitutionsabbreviation<img class="organisationcivilization19th centuryarchitectureincorporated20th century-container">most notably/></a></div>notification'undefined')Furthermore,believe thatinnerHTML = prior to thedramaticallyreferring tonegotiationsheadquartersSouth AfricaunsuccessfulPennsylvaniaAs a result,<html lang="&lt;/sup&gt;dealing withphiladelphiahistorically);</script> padding-top:experimentalgetAttributeinstructionstechnologiespart of the =function(){subscriptionl.dtd"> <htgeographicalConstitution', function(supported byagriculturalconstructionpublicationsfont-size: 1a variety of<div style="Encyclopediaiframe src="demonstratedaccomplisheduniversitiesDemographics);</script><dedicated toknowledge ofsatisfactionparticularly</div></div>English (US)appendChild(transmissions. However, intelligence" tabindex="float:right;Commonwealthranging fromin which theat least onereproductionencyclopedia;font-size:1jurisdictionat that time"><a class="In addition,description+conversationcontact withis generallyr" content="representing&lt;math&gt;presentationoccasionally<img width="navigation">compensationchampionshipmedia="all" violation ofreference toreturn true;Strict//EN" transactionsinterventionverificationInformation difficultiesChampionshipcapabilities<![endif]-->} </script> Christianityfor example,Professionalrestrictionssuggest thatwas released(such as theremoveClass(unemploymentthe Americanstructure of/index.html published inspan class=""><a href="/introductionbelonging toclaimed thatconsequences<meta name="Guide to theoverwhelmingagainst the concentrated, .nontouch observations</a> </div> f (document.border: 1px {font-size:1treatment of0" height="1modificationIndependencedivided intogreater thanachievementsestablishingJavaScript" neverthelesssignificanceBroadcasting>&nbsp;</td>container"> such as the influence ofa particularsrc='http://navigation" half of the substantial &nbsp;</div>advantage ofdiscovery offundamental metropolitanthe opposite" xml:lang="deliberatelyalign=centerevolution ofpreservationimprovementsbeginning inJesus ChristPublicationsdisagreementtext-align:r, function()similaritiesbody></html>is currentlyalphabeticalis sometimestype="image/many of the flow:hidden;available indescribe theexistence ofall over thethe Internet <ul class="installationneighborhoodarmed forcesreducing thecontinues toNonetheless,temperatures <a href="close to theexamples of is about the(see below)." id="searchprofessionalis availablethe official </script> <div id="accelerationthrough the Hall of Famedescriptionstranslationsinterference type='text/recent yearsin the worldvery popular{background:traditional some of the connected toexploitationemergence ofconstitutionA History ofsignificant manufacturedexpectations><noscript><can be foundbecause the has not beenneighbouringwithout the added to the <li class="instrumentalSoviet Unionacknowledgedwhich can bename for theattention toattempts to developmentsIn fact, the<li class="aimplicationssuitable formuch of the colonizationpresidentialcancelBubble Informationmost of the is describedrest of the more or lessin SeptemberIntelligencesrc="http://px; height: available tomanufacturerhuman rightslink href="/availabilityproportionaloutside the astronomicalhuman beingsname of the are found inare based onsmaller thana person whoexpansion ofarguing thatnow known asIn the earlyintermediatederived fromScandinavian</a></div> consider thean estimatedthe National<div id="pagresulting incommissionedanalogous toare required/ul> </div> was based onand became a&nbsp;&nbsp;t" value="" was capturedno more thanrespectivelycontinue to > <head> <were createdmore generalinformation used for theindependent the Imperialcomponent ofto the northinclude the Constructionside of the would not befor instanceinvention ofmore complexcollectivelybackground: text-align: its originalinto accountthis processan extensivehowever, thethey are notrejected thecriticism ofduring whichprobably thethis article(function(){It should bean agreementaccidentallydiffers fromArchitecturebetter knownarrangementsinfluence onattended theidentical tosouth of thepass throughxml" title="weight:bold;creating thedisplay:nonereplaced the<img src="/ihttps://www.World War IItestimonialsfound in therequired to and that thebetween the was designedconsists of considerablypublished bythe languageConservationconsisted ofrefer to theback to the css" media="People from available onproved to besuggestions"was known asvarieties oflikely to becomprised ofsupport the hands of thecoupled withconnect and border:none;performancesbefore beinglater becamecalculationsoften calledresidents ofmeaning that><li class="evidence forexplanationsenvironments"></a></div>which allowsIntroductiondeveloped bya wide rangeon behalf ofvalign="top"principle ofat the time,</noscript> said to havein the firstwhile othershypotheticalphilosopherspower of thecontained inperformed byinability towere writtenspan style="input name="the questionintended forrejection ofimplies thatinvented thethe standardwas probablylink betweenprofessor ofinteractionschanging theIndian Ocean class="lastworking with'http://www.years beforeThis was therecreationalentering themeasurementsan extremelyvalue of thestart of the </script> an effort toincrease theto the southspacing="0">sufficientlythe Europeanconverted toclearTimeoutdid not haveconsequentlyfor the nextextension ofeconomic andalthough theare producedand with theinsufficientgiven by thestating thatexpenditures</span></a> thought thaton the basiscellpadding=image of thereturning toinformation,separated byassassinateds" content="authority ofnorthwestern</div> <div "></div> consultationcommunity ofthe nationalit should beparticipants align="leftthe greatestselection ofsupernaturaldependent onis mentionedallowing thewas inventedaccompanyinghis personalavailable atstudy of theon the otherexecution ofHuman Rightsterms of theassociationsresearch andsucceeded bydefeated theand from thebut they arecommander ofstate of theyears of agethe study of<ul class="splace in thewhere he was<li class="fthere are nowhich becamehe publishedexpressed into which thecommissionerfont-weight:territory ofextensions">Roman Empireequal to theIn contrast,however, andis typicallyand his wife(also called><ul class="effectively evolved intoseem to havewhich is thethere was noan excellentall of thesedescribed byIn practice,broadcastingcharged withreflected insubjected tomilitary andto the pointeconomicallysetTargetingare actuallyvictory over();</script>continuouslyrequired forevolutionaryan effectivenorth of the, which was front of theor otherwisesome form ofhad not beengenerated byinformation.permitted toincludes thedevelopment,entered intothe previousconsistentlyare known asthe field ofthis type ofgiven to thethe title ofcontains theinstances ofin the northdue to theirare designedcorporationswas that theone of thesemore popularsucceeded insupport fromin differentdominated bydesigned forownership ofand possiblystandardizedresponseTextwas intendedreceived theassumed thatareas of theprimarily inthe basis ofin the senseaccounts fordestroyed byat least twowas declaredcould not beSecretary ofappear to bemargin-top:1/^\s+|\s+$/ge){throw e};the start oftwo separatelanguage andwho had beenoperation ofdeath of thereal numbers <link rel="provided thethe story ofcompetitionsenglish (UK)english (US)МонголСрпскисрпскисрпскоلعربية正體中文简体中文繁体中文有限公司人民政府阿里巴巴社会主义操作系统政策法规informaciónherramientaselectrónicodescripciónclasificadosconocimientopublicaciónrelacionadasinformáticarelacionadosdepartamentotrabajadoresdirectamenteayuntamientomercadoLibrecontáctenoshabitacionescumplimientorestaurantesdisposiciónconsecuenciaelectrónicaaplicacionesdesconectadoinstalaciónrealizaciónutilizaciónenciclopediaenfermedadesinstrumentosexperienciasinstituciónparticularessubcategoriaтолькоРоссииработыбольшепростоможетедругихслучаесейчасвсегдаРоссияМоскведругиегородавопросданныхдолжныименноМосквырублейМосквастраныничегоработедолженуслугитеперьОднакопотомуработуапрелявообщеодногосвоегостатьидругойфорумехорошопротивссылкакаждыйвластигруппывместеработасказалпервыйделатьденьгипериодбизнесосновемоменткупитьдолжнарамкахначалоРаботаТолькосовсемвторойначаласписокслужбысистемпечатиновогопомощисайтовпочемупомощьдолжноссылкибыстроданныемногиепроектСейчасмоделитакогоонлайнгородеверсиястранефильмыуровняразныхискатьнеделюянваряменьшемногихданнойзначитнельзяфорумаТеперьмесяцазащитыЛучшиеनहींकरनेअपनेकियाकरेंअन्यक्यागाइडबारेकिसीदियापहलेसिंहभारतअपनीवालेसेवाकरतेमेरेहोनेसकतेबहुतसाइटहोगाजानेमिनटकरताकरनाउनकेयहाँसबसेभाषाआपकेलियेशुरूइसकेघंटेमेरीसकतामेरालेकरअधिकअपनासमाजमुझेकारणहोताकड़ीयहांहोटलशब्दलियाजीवनजाताकैसेआपकावालीदेनेपूरीपानीउसकेहोगीबैठकआपकीवर्षगांवआपकोजिलाजानासहमतहमेंउनकीयाहूदर्जसूचीपसंदसवालहोनाहोतीजैसेवापसजनतानेताजारीघायलजिलेनीचेजांचपत्रगूगलजातेबाहरआपनेवाहनइसकासुबहरहनेइससेसहितबड़ेघटनातलाशपांचश्रीबड़ीहोतेसाईटशायदसकतीजातीवालाहजारपटनारखनेसड़कमिलाउसकीकेवललगताखानाअर्थजहांदेखापहलीनियमबिनाबैंककहींकहनादेताहमलेकाफीजबकितुरतमांगवहींरोज़मिलीआरोपसेनायादवलेनेखाताकरीबउनकाजवाबपूराबड़ासौदाशेयरकियेकहांअकसरबनाएवहांस्थलमिलेलेखकविषयक्रंसमूहथानाتستطيعمشاركةبواسطةالصفحةمواضيعالخاصةالمزيدالعامةالكاتبالردودبرنامجالدولةالعالمالموقعالعربيالسريعالجوالالذهابالحياةالحقوقالكريمالعراقمحفوظةالثانيمشاهدةالمرأةالقرآنالشبابالحوارالجديدالأسرةالعلوممجموعةالرحمنالنقاطفلسطينالكويتالدنيابركاتهالرياضتحياتيبتوقيتالأولىالبريدالكلامالرابطالشخصيسياراتالثالثالصلاةالحديثالزوارالخليجالجميعالعامهالجمالالساعةمشاهدهالرئيسالدخولالفنيةالكتابالدوريالدروساستغرقتصاميمالبناتالعظيمentertainmentunderstanding = function().jpg" width="configuration.png" width="<body class="Math.random()contemporary United Statescircumstances.appendChild(organizations<span class=""><img src="/distinguishedthousands of communicationclear"></div>investigationfavicon.ico" margin-right:based on the Massachusettstable border=internationalalso known aspronunciationbackground:#fpadding-left:For example, miscellaneous&lt;/math&gt;psychologicalin particularearch" type="form method="as opposed toSupreme Courtoccasionally Additionally,North Americapx;backgroundopportunitiesEntertainment.toLowerCase(manufacturingprofessional combined withFor instance,consisting of" maxlength="return false;consciousnessMediterraneanextraordinaryassassinationsubsequently button type="the number ofthe original comprehensiverefers to the</ul> </div> philosophicallocation.hrefwas publishedSan Francisco(function(){ <div id="mainsophisticatedmathematical /head> <bodysuggests thatdocumentationconcentrationrelationshipsmay have been(for example,This article in some casesparts of the definition ofGreat Britain cellpadding=equivalent toplaceholder="; font-size: justificationbelieved thatsuffered fromattempted to leader of thecript" src="/(function() {are available <link rel=" src='http://interested inconventional " alt="" /></are generallyhas also beenmost popular correspondingcredited withtyle="border:</a></span></.gif" width="<iframe src="table class="inline-block;according to together withapproximatelyparliamentarymore and moredisplay:none;traditionallypredominantly&nbsp;|&nbsp;&nbsp;</span> cellspacing=<input name="or" content="controversialproperty="og:/x-shockwave-demonstrationsurrounded byNevertheless,was the firstconsiderable Although the collaborationshould not beproportion of<span style="known as the shortly afterfor instance,described as /head> <body starting withincreasingly the fact thatdiscussion ofmiddle of thean individualdifficult to point of viewhomosexualityacceptance of</span></div>manufacturersorigin of thecommonly usedimportance ofdenominationsbackground: #length of thedeterminationa significant" border="0">revolutionaryprinciples ofis consideredwas developedIndo-Europeanvulnerable toproponents ofare sometimescloser to theNew York City name="searchattributed tocourse of themathematicianby the end ofat the end of" border="0" technological.removeClass(branch of theevidence that![endif]--> Institute of into a singlerespectively.and thereforeproperties ofis located insome of whichThere is alsocontinued to appearance of &amp;ndash; describes theconsiderationauthor of theindependentlyequipped withdoes not have</a><a href="confused with<link href="/at the age ofappear in theThese includeregardless ofcould be used style=&quot;several timesrepresent thebody> </html>thought to bepopulation ofpossibilitiespercentage ofaccess to thean attempt toproduction ofjquery/jquerytwo differentbelong to theestablishmentreplacing thedescription" determine theavailable forAccording to wide range of <div class="more commonlyorganisationsfunctionalitywas completed &amp;mdash; participationthe characteran additionalappears to befact that thean example ofsignificantlyonmouseover="because they async = true;problems withseems to havethe result of src="http://familiar withpossession offunction () {took place inand sometimessubstantially<span></span>is often usedin an attemptgreat deal ofEnvironmentalsuccessfully virtually all20th century,professionalsnecessary to determined bycompatibilitybecause it isDictionary ofmodificationsThe followingmay refer to:Consequently,Internationalalthough somethat would beworld's firstclassified asbottom of the(particularlyalign="left" most commonlybasis for thefoundation ofcontributionspopularity ofcenter of theto reduce thejurisdictionsapproximation onmouseout="New Testamentcollection of</span></a></in the Unitedfilm director-strict.dtd">has been usedreturn to thealthough thischange in theseveral otherbut there areunprecedentedis similar toespecially inweight: bold;is called thecomputationalindicate thatrestricted to <meta name="are typicallyconflict withHowever, the An example ofcompared withquantities ofrather than aconstellationnecessary forreported thatspecificationpolitical and&nbsp;&nbsp;<references tothe same yearGovernment ofgeneration ofhave not beenseveral yearscommitment to <ul class="visualization19th century,practitionersthat he wouldand continuedoccupation ofis defined ascentre of thethe amount of><div style="equivalent ofdifferentiatebrought aboutmargin-left: automaticallythought of asSome of these <div class="input class="replaced withis one of theeducation andinfluenced byreputation as <meta name="accommodation</div> </div>large part ofInstitute forthe so-called against the In this case,was appointedclaimed to beHowever, thisDepartment ofthe remainingeffect on theparticularly deal with the <div style="almost alwaysare currentlyexpression ofphilosophy offor more thancivilizationson the islandselectedIndexcan result in" value="" />the structure /></a></div>Many of thesecaused by theof the Unitedspan class="mcan be tracedis related tobecame one ofis frequentlyliving in thetheoreticallyFollowing theRevolutionarygovernment inis determinedthe politicalintroduced insufficient todescription">short storiesseparation ofas to whetherknown for itswas initiallydisplay:blockis an examplethe principalconsists of arecognized as/body></html>a substantialreconstructedhead of stateresistance toundergraduateThere are twogravitationalare describedintentionallyserved as theclass="headeropposition tofundamentallydominated theand the otheralliance withwas forced torespectively,and politicalin support ofpeople in the20th century.and publishedloadChartbeatto understandmember statesenvironmentalfirst half ofcountries andarchitecturalbe consideredcharacterizedclearIntervalauthoritativeFederation ofwas succeededand there area consequencethe Presidentalso includedfree softwaresuccession ofdeveloped thewas destroyedaway from the; </script> <although theyfollowed by amore powerfulresulted in aUniversity ofHowever, manythe presidentHowever, someis thought tountil the endwas announcedare importantalso includes><input type=the center of DO NOT ALTERused to referthemes/?sort=that had beenthe basis forhas developedin the summercomparativelydescribed thesuch as thosethe resultingis impossiblevarious otherSouth Africanhave the sameeffectivenessin which case; text-align:structure and; background:regarding thesupported theis also knownstyle="marginincluding thebahasa Melayunorsk bokmålnorsk nynorskslovenščinainternacionalcalificacióncomunicaciónconstrucción"><div class="disambiguationDomainName', 'administrationsimultaneouslytransportationInternational margin-bottom:responsibility<![endif]--> </><meta name="implementationinfrastructurerepresentationborder-bottom:</head> <body>=http%3A%2F%2F<form method="method="post" /favicon.ico" }); </script> .setAttribute(Administration= new Array();<![endif]--> display:block;Unfortunately,">&nbsp;</div>/favicon.ico">='stylesheet' identification, for example,<li><a href="/an alternativeas a result ofpt"></script> type="submit" (function() {recommendationform action="/transformationreconstruction.style.display According to hidden" name="along with thedocument.body.approximately Communicationspost" action="meaning &quot;--<![endif]-->Prime Ministercharacteristic</a> <a class=the history of onmouseover="the governmenthref="https://was originallywas introducedclassificationrepresentativeare considered<![endif]--> depends on theUniversity of in contrast to placeholder="in the case ofinternational constitutionalstyle="border-: function() {Because of the-strict.dtd"> <table class="accompanied byaccount of the<script src="/nature of the the people in in addition tos); js.id = id" width="100%"regarding the Roman Catholican independentfollowing the .gif" width="1the following discriminationarchaeologicalprime minister.js"></script>combination of marginwidth="createElement(w.attachEvent(</a></td></tr>src="https://aIn particular, align="left" Czech RepublicUnited Kingdomcorrespondenceconcluded that.html" title="(function () {comes from theapplication of<span class="sbelieved to beement('script'</a> </li> <livery different><span class="option value="(also known as <li><a href="><input name="separated fromreferred to as valign="top">founder of theattempting to carbon dioxide <div class="class="search-/body> </html>opportunity tocommunications</head> <body style="width:Tiếng Việtchanges in theborder-color:#0" border="0" </span></div><was discovered" type="text" ); </script> Department of ecclesiasticalthere has beenresulting from</body></html>has never beenthe first timein response toautomatically </div> <div iwas consideredpercent of the" /></a></div>collection of descended fromsection of theaccept-charsetto be confusedmember of the padding-right:translation ofinterpretation href='http://whether or notThere are alsothere are manya small numberother parts ofimpossible to class="buttonlocated in the. However, theand eventuallyAt the end of because of itsrepresents the<form action=" method="post"it is possiblemore likely toan increase inhave also beencorresponds toannounced thatalign="right">many countriesfor many yearsearliest knownbecause it waspt"></script> valign="top" inhabitants offollowing year <div class="million peoplecontroversial concerning theargue that thegovernment anda reference totransferred todescribing the style="color:although therebest known forsubmit" name="multiplicationmore than one recognition ofCouncil of theedition of the <meta name="Entertainment away from the ;margin-right:at the time ofinvestigationsconnected withand many otheralthough it isbeginning with <span class="descendants of<span class="i align="right"</head> <body aspects of thehas since beenEuropean Unionreminiscent ofmore difficultVice Presidentcomposition ofpassed throughmore importantfont-size:11pxexplanation ofthe concept ofwritten in the <span class="is one of the resemblance toon the groundswhich containsincluding the defined by thepublication ofmeans that theoutside of thesupport of the<input class="<span class="t(Math.random()most prominentdescription ofConstantinoplewere published<div class="seappears in the1" height="1" most importantwhich includeswhich had beendestruction ofthe population <div class="possibility ofsometimes usedappear to havesuccess of theintended to bepresent in thestyle="clear:b </script> <was founded ininterview with_id" content="capital of the <link rel="srelease of thepoint out thatxMLHttpRequestand subsequentsecond largestvery importantspecificationssurface of theapplied to theforeign policy_setDomainNameestablished inis believed toIn addition tomeaning of theis named afterto protect theis representedDeclaration ofmore efficientClassificationother forms ofhe returned to<span class="cperformance of(function() { if and only ifregions of theleading to therelations withUnited Nationsstyle="height:other than theype" content="Association of </head> <bodylocated on theis referred to(including theconcentrationsthe individualamong the mostthan any other/> <link rel=" return false;the purpose ofthe ability to;color:#fff} . <span class="the subject ofdefinitions of> <link rel="claim that thehave developed<table width="celebration ofFollowing the to distinguish<span class="btakes place inunder the namenoted that the><![endif]--> style="margin-instead of theintroduced thethe process ofincreasing thedifferences inestimated thatespecially the/div><div id="was eventuallythroughout histhe differencesomething thatspan></span></significantly ></script> environmental to prevent thehave been usedespecially forunderstand theis essentiallywere the firstis the largesthave been made" src="http://interpreted assecond half ofcrolling="no" is composed ofII, Holy Romanis expected tohave their owndefined as thetraditionally have differentare often usedto ensure thatagreement withcontaining theare frequentlyinformation onexample is theresulting in a</a></li></ul> class="footerand especiallytype="button" </span></span>which included> <meta name="considered thecarried out byHowever, it isbecame part ofin relation topopular in thethe capital ofwas officiallywhich has beenthe History ofalternative todifferent fromto support thesuggested thatin the process <div class="the foundationbecause of hisconcerned withthe universityopposed to thethe context of<span class="ptext" name="q" <div class="the scientificrepresented bymathematicianselected by thethat have been><div class="cdiv id="headerin particular,converted into); </script> <philosophical srpskohrvatskitiếng ViệtРусскийрусскийinvestigaciónparticipaciónкоторыеобластикоторыйчеловексистемыНовостикоторыхобластьвременикотораясегодняскачатьновостиУкраинывопросыкоторойсделатьпомощьюсредствобразомстороныучастиетечениеГлавнаяисториисистемарешенияСкачатьпоэтомуследуетсказатьтоваровконечнорешениекотороеоргановкоторомРекламаالمنتدىمنتدياتالموضوعالبرامجالمواقعالرسائلمشاركاتالأعضاءالرياضةالتصميمالاعضاءالنتائجالألعابالتسجيلالأقسامالضغطاتالفيديوالترحيبالجديدةالتعليمالأخبارالافلامالأفلامالتاريخالتقنيةالالعابالخواطرالمجتمعالديكورالسياحةعبداللهالتربيةالروابطالأدبيةالاخبارالمتحدةالاغانيcursor:pointer;</title> <meta " href="http://"><span class="members of the window.locationvertical-align:/a> | <a href="<!doctype html>media="screen" <option value="favicon.ico" /> <div class="characteristics" method="get" /body> </html> shortcut icon" document.write(padding-bottom:representativessubmit" value="align="center" throughout the science fiction <div class="submit" class="one of the most valign="top"><was established); </script> return false;">).style.displaybecause of the document.cookie<form action="/}body{margin:0;Encyclopedia ofversion of the .createElement(name" content="</div> </div> administrative </body> </html>history of the "><input type="portion of the as part of the &nbsp;<a href="other countries"> <div class="</span></span><In other words,display: block;control of the introduction of/> <meta name="as well as the in recent years <div class="</div> </div> inspired by thethe end of the compatible withbecame known as style="margin:.js"></script>< International there have beenGerman language style="color:#Communist Partyconsistent withborder="0" cell marginheight="the majority of" align="centerrelated to the many different Orthodox Churchsimilar to the /> <link rel="swas one of the until his death})(); </script>other languagescompared to theportions of thethe Netherlandsthe most commonbackground:url(argued that thescrolling="no" included in theNorth American the name of theinterpretationsthe traditionaldevelopment of frequently useda collection ofvery similar tosurrounding theexample of thisalign="center">would have beenimage_caption =attached to thesuggesting thatin the form of involved in theis derived fromnamed after theIntroduction torestrictions on style="width: can be used to the creation ofmost important information andresulted in thecollapse of theThis means thatelements of thewas replaced byanalysis of theinspiration forregarded as themost successfulknown as &quot;a comprehensiveHistory of the were consideredreturned to theare referred toUnsourced image> <div class="consists of thestopPropagationinterest in theavailability ofappears to haveelectromagneticenableServices(function of theIt is important</script></div>function(){var relative to theas a result of the position ofFor example, in method="post" was followed by&amp;mdash; thethe applicationjs"></script> ul></div></div>after the deathwith respect tostyle="padding:is particularlydisplay:inline; type="submit" is divided into中文 (简体)responsabilidadadministracióninternacionalescorrespondienteउपयोगपूर्वहमारेलोगोंचुनावलेकिनसरकारपुलिसखोजेंचाहिएभेजेंशामिलहमारीजागरणबनानेकुमारब्लॉगमालिकमहिलापृष्ठबढ़तेभाजपाक्लिकट्रेनखिलाफदौरानमामलेमतदानबाजारविकासक्योंचाहतेपहुँचबतायासंवाददेखनेपिछलेविशेषराज्यउत्तरमुंबईदोनोंउपकरणपढ़ेंस्थितफिल्ममुख्यअच्छाछूटतीसंगीतजाएगाविभागघण्टेदूसरेदिनोंहत्यासेक्सगांधीविश्वरातेंदैट्सनक्शासामनेअदालतबिजलीपुरूषहिंदीमित्रकवितारुपयेस्थानकरोड़मुक्तयोजनाकृपयापोस्टघरेलूकार्यविचारसूचनामूल्यदेखेंहमेशास्कूलमैंनेतैयारजिसकेrss+xml" title="-type" content="title" content="at the same time.js"></script> <" method="post" </span></a></li>vertical-align:t/jquery.min.js">.click(function( style="padding-})(); </script> </span><a href="<a href="http://); return false;text-decoration: scrolling="no" border-collapse:associated with Bahasa IndonesiaEnglish language<text xml:space=.gif" border="0"</body> </html> overflow:hidden;img src="http://addEventListenerresponsible for s.js"></script> /favicon.ico" />operating system" style="width:1target="_blank">State Universitytext-align:left; document.write(, including the around the world); </script> <" style="height:;overflow:hiddenmore informationan internationala member of the one of the firstcan be found in </div> </div> display: none;">" /> <link rel=" (function() {the 15th century.preventDefault(large number of Byzantine Empire.jpg|thumb|left|vast majority ofmajority of the align="center">University Pressdominated by theSecond World Wardistribution of style="position:the rest of the characterized by rel="nofollow">derives from therather than the a combination ofstyle="width:100English-speakingcomputer scienceborder="0" alt="the existence ofDemocratic Party" style="margin-For this reason,.js"></script> sByTagName(s)[0]js"></script> <.js"></script> link rel="icon" ' alt='' class='formation of theversions of the </a></div></div>/page> <page> <div class="contbecame the firstbahasa Indonesiaenglish (simple)ΕλληνικάхрватскикомпанииявляетсяДобавитьчеловекаразвитияИнтернетОтветитьнапримеринтернеткоторогостраницыкачествеусловияхпроблемыполучитьявляютсянаиболеекомпаниявниманиесредстваالمواضيعالرئيسيةالانتقالمشاركاتكالسياراتالمكتوبةالسعوديةاحصائياتالعالميةالصوتياتالانترنتالتصاميمالإسلاميالمشاركةالمرئياتrobots" content="<div id="footer">the United States<img src="http://.jpg|right|thumb|.js"></script> <location.protocolframeborder="0" s" /> <meta name="</a></div></div><font-weight:bold;&quot; and &quot;depending on the margin:0;padding:" rel="nofollow" President of the twentieth centuryevision> </pageInternet Explorera.async = true; information about<div id="header">" action="http://<a href="https://<div id="content"</div> </div> <derived from the <img src='http://according to the </body> </html> style="font-size:script language="Arial, Helvetica,</a><span class="</script><script political partiestd></tr></table><href="http://www.interpretation ofrel="stylesheet" document.write('<charset="utf-8"> beginning of the revealed that thetelevision series" rel="nofollow"> target="_blank">claiming that thehttp%3A%2F%2Fwww.manifestations ofPrime Minister ofinfluenced by theclass="clearfix">/div> </div> three-dimensionalChurch of Englandof North Carolinasquare kilometres.addEventListenerdistinct from thecommonly known asPhonetic Alphabetdeclared that thecontrolled by theBenjamin Franklinrole-playing gamethe University ofin Western Europepersonal computerProject Gutenbergregardless of thehas been proposedtogether with the></li><li class="in some countriesmin.js"></script>of the populationofficial language<img src="images/identified by thenatural resourcesclassification ofcan be consideredquantum mechanicsNevertheless, themillion years ago</body> </html> Ελληνικά take advantage ofand, according toattributed to theMicrosoft Windowsthe first centuryunder the controldiv class="headershortly after thenotable exceptiontens of thousandsseveral differentaround the world.reaching militaryisolated from theopposition to thethe Old TestamentAfrican Americansinserted into theseparate from themetropolitan areamakes it possibleacknowledged thatarguably the mosttype="text/css"> the InternationalAccording to the pe="text/css" /> coincide with thetwo-thirds of theDuring this time,during the periodannounced that hethe internationaland more recentlybelieved that theconsciousness andformerly known assurrounded by thefirst appeared inoccasionally usedposition:absolute;" target="_blank" position:relative;text-align:center;jax/libs/jquery/1.background-color:#type="application/anguage" content="<meta http-equiv="Privacy Policy</a>e("%3Cscript src='" target="_blank">On the other hand,.jpg|thumb|right|2</div><div class="<div style="float:nineteenth century</body> </html> <img src="http://s;text-align:centerfont-weight: bold; According to the difference between" frameborder="0" " style="position:link href="http://html4/loose.dtd"> during this period</td></tr></table>closely related tofor the first time;font-weight:bold;input type="text" <span style="font-onreadystatechange <div class="cleardocument.location. For example, the a wide variety of <!DOCTYPE html> <&nbsp;&nbsp;&nbsp;"><a href="http://style="float:left;concerned with the=http%3A%2F%2Fwww.in popular culturetype="text/css" />it is possible to Harvard Universitytylesheet" href="/the main characterOxford University name="keywords" cstyle="text-align:the United Kingdomfederal government<div style="margin depending on the description of the<div class="header.min.js"></script>destruction of theslightly differentin accordance withtelecommunicationsindicates that theshortly thereafterespecially in the European countriesHowever, there aresrc="http://staticsuggested that the" src="http://www.a large number of Telecommunications" rel="nofollow" tHoly Roman Emperoralmost exclusively" border="0" alt="Secretary of Stateculminating in theCIA World Factbookthe most importantanniversary of thestyle="background-<li><em><a href="/the Atlantic Oceanstrictly speaking,shortly before thedifferent types ofthe Ottoman Empire><img src="http://An Introduction toconsequence of thedeparture from theConfederate Statesindigenous peoplesProceedings of theinformation on thetheories have beeninvolvement in thedivided into threeadjacent countriesis responsible fordissolution of thecollaboration withwidely regarded ashis contemporariesfounding member ofDominican Republicgenerally acceptedthe possibility ofare also availableunder constructionrestoration of thethe general publicis almost entirelypasses through thehas been suggestedcomputer and videoGermanic languages according to the different from theshortly afterwardshref="https://www.recent developmentBoard of Directors<div class="search| <a href="http://In particular, theMultiple footnotesor other substancethousands of yearstranslation of the</div> </div> <a href="index.phpwas established inmin.js"></script> participate in thea strong influencestyle="margin-top:represented by thegraduated from theTraditionally, theElement("script");However, since the/div> </div> <div left; margin-left:protection against0; vertical-align:Unfortunately, thetype="image/x-icon/div> <div class=" class="clearfix"><div class="footer </div> </div> the motion pictureБългарскибългарскиФедерациинесколькосообщениесообщенияпрограммыОтправитьбесплатноматериалыпозволяетпоследниеразличныхпродукциипрограммаполностьюнаходитсяизбранноенаселенияизменениякатегорииАлександрद्वारामैनुअलप्रदानभारतीयअनुदेशहिन्दीइंडियादिल्लीअधिकारवीडियोचिट्ठेसमाचारजंक्शनदुनियाप्रयोगअनुसारऑनलाइनपार्टीशर्तोंलोकसभाफ़्लैशशर्तेंप्रदेशप्लेयरकेंद्रस्थितिउत्पादउन्हेंचिट्ठायात्राज्यादापुरानेजोड़ेंअनुवादश्रेणीशिक्षासरकारीसंग्रहपरिणामब्रांडबच्चोंउपलब्धमंत्रीसंपर्कउम्मीदमाध्यमसहायताशब्दोंमीडियाआईपीएलमोबाइलसंख्याआपरेशनअनुबंधबाज़ारनवीनतमप्रमुखप्रश्नपरिवारनुकसानसमर्थनआयोजितसोमवारالمشاركاتالمنتدياتالكمبيوترالمشاهداتعددالزوارعددالردودالإسلاميةالفوتوشوبالمسابقاتالمعلوماتالمسلسلاتالجرافيكسالاسلاميةالاتصالاتkeywords" content="w3.org/1999/xhtml"><a target="_blank" text/html; charset=" target="_blank"><table cellpadding="autocomplete="off" text-align: center;to last version by background-color: #" href="http://www./div></div><div id=<a href="#" class=""><img src="http://cript" src="http:// <script language="//EN" "http://www.wencodeURIComponent(" href="javascript:<div class="contentdocument.write('<scposition: absolute;script src="http:// style="margin-top:.min.js"></script> </div> <div class="w3.org/1999/xhtml" </body> </html>distinction between/" target="_blank"><link href="http://encoding="utf-8"?> w.addEventListener?action="http://www.icon" href="http:// style="background:type="text/css" /> meta property="og:t<input type="text" style="text-align:the development of tylesheet" type="tehtml; charset=utf-8is considered to betable width="100%" In addition to the contributed to the differences betweendevelopment of the It is important to </script> <script style="font-size:1></span><span id=gbLibrary of Congress<img src="http://imEnglish translationAcademy of Sciencesdiv style="display:construction of the.getElementById(id)in conjunction withElement('script'); <meta property="og:Български type="text" name=">Privacy Policy</a>administered by theenableSingleRequeststyle=&quot;margin:</div></div></div><><img src="http://i style=&quot;float:referred to as the total population ofin Washington, D.C. style="background-among other things,organization of theparticipated in thethe introduction ofidentified with thefictional character Oxford University misunderstanding ofThere are, however,stylesheet" href="/Columbia Universityexpanded to includeusually referred toindicating that thehave suggested thataffiliated with thecorrelation betweennumber of different></td></tr></table>Republic of Ireland </script> <script under the influencecontribution to theOfficial website ofheadquarters of thecentered around theimplications of thehave been developedFederal Republic ofbecame increasinglycontinuation of theNote, however, thatsimilar to that of capabilities of theaccordance with theparticipants in thefurther developmentunder the directionis often consideredhis younger brother</td></tr></table><a http-equiv="X-UA-physical propertiesof British Columbiahas been criticized(with the exceptionquestions about thepassing through the0" cellpadding="0" thousands of peopleredirects here. Forhave children under%3E%3C/script%3E"));<a href="http://www.<li><a href="http://site_name" content="text-decoration:nonestyle="display: none<meta http-equiv="X-new Date().getTime() type="image/x-icon"</span><span class="language="javascriptwindow.location.href<a href="javascript:--> <script type="t<a href='http://www.hortcut icon" href="</div> <div class="<script src="http://" rel="stylesheet" t</div> <script type=/a> <a href="http:// allowTransparency="X-UA-Compatible" conrelationship between </script> <script </a></li></ul></div>associated with the programming language</a><a href="http://</a></li><li class="form action="http://<div style="display:type="text" name="q"<table width="100%" background-position:" border="0" width="rel="shortcut icon" h6><ul><li><a href=" <meta http-equiv="css" media="screen" responsible for the " type="application/" style="background-html; charset=utf-8" allowtransparency="stylesheet" type="te <meta http-equiv="></span><span class="0" cellspacing="0">; </script> <script sometimes called thedoes not necessarilyFor more informationat the beginning of <!DOCTYPE html><htmlparticularly in the type="hidden" name="javascript:void(0);"effectiveness of the autocomplete="off" generally considered><input type="text" "></script> <scriptthroughout the worldcommon misconceptionassociation with the</div> </div> <div cduring his lifetime,corresponding to thetype="image/x-icon" an increasing numberdiplomatic relationsare often consideredmeta charset="utf-8" <input type="text" examples include the"><img src="http://iparticipation in thethe establishment of </div> <div class="&amp;nbsp;&amp;nbsp;to determine whetherquite different frommarked the beginningdistance between thecontributions to theconflict between thewidely considered towas one of the firstwith varying degreeshave speculated that(document.getElementparticipating in theoriginally developedeta charset="utf-8"> type="text/css" /> interchangeably withmore closely relatedsocial and politicalthat would otherwiseperpendicular to thestyle type="text/csstype="submit" name="families residing indeveloping countriescomputer programmingeconomic developmentdetermination of thefor more informationon several occasionsportuguês (Europeu)УкраїнськаукраїнськаРоссийскойматериаловинформацииуправлениянеобходимоинформацияИнформацияРеспубликиколичествоинформациютерриториидостаточноالمتواجدونالاشتراكاتالاقتراحاتhtml; charset=UTF-8" setTimeout(function()display:inline-block;<input type="submit" type = 'text/javascri<img src="http://www." "http://www.w3.org/shortcut icon" href="" autocomplete="off" </a></div><div class=</a></li> <li class="css" type="text/css" <form action="http://xt/css" href="http://link rel="alternate" <script type="text/ onclick="javascript:(new Date).getTime()}height="1" width="1" People's Republic of <a href="http://www.text-decoration:underthe beginning of the </div> </div> </div> establishment of the </div></div></div></d#viewport{min-height: <script src="http://option><option value=often referred to as /option> <option valu<!DOCTYPE html> <!--[International Airport> <a href="http://www</a><a href="http://wภาษาไทยქართული正體中文 (繁體)निर्देशडाउनलोडक्षेत्रजानकारीसंबंधितस्थापनास्वीकारसंस्करणसामग्रीचिट्ठोंविज्ञानअमेरिकाविभिन्नगाडियाँक्योंकिसुरक्षापहुँचतीप्रबंधनटिप्पणीक्रिकेटप्रारंभप्राप्तमालिकोंरफ़्तारनिर्माणलिमिटेडdescription" content="document.location.prot.getElementsByTagName(<!DOCTYPE html> <html <meta charset="utf-8">:url" content="http://.css" rel="stylesheet"style type="text/css">type="text/css" href="w3.org/1999/xhtml" xmltype="text/javascript" method="get" action="link rel="stylesheet" = document.getElementtype="image/x-icon" />cellpadding="0" cellsp.css" type="text/css" </a></li><li><a href="" width="1" height="1""><a href="http://www.style="display:none;">alternate" type="appli-//W3C//DTD XHTML 1.0 ellspacing="0" cellpad type="hidden" value="/a>&nbsp;<span role="s <input type="hidden" language="JavaScript" document.getElementsBg="0" cellspacing="0" ype="text/css" media="type='text/javascript'with the exception of ype="text/css" rel="st height="1" width="1" ='+encodeURIComponent(<link rel="alternate" body, tr, input, textmeta name="robots" conmethod="post" action="> <a href="http://www.css" rel="stylesheet" </div></div><div classlanguage="javascript">aria-hidden="true">·<ript" type="text/javasl=0;})(); (function(){background-image: url(/a></li><li><a href="h <li><a href="http://ator" aria-hidden="tru> <a href="http://www.language="javascript" /option> <option value/div></div><div class=rator" aria-hidden="tre=(new Date).getTime()português (do Brasil)организациивозможностьобразованиярегистрациивозможностиобязательна<!DOCTYPE html PUBLIC "nt-Type" content="text/<meta http-equiv="Conteransitional//EN" "http:<html xmlns="http://www-//W3C//DTD XHTML 1.0 TDTD/xhtml1-transitional//www.w3.org/TR/xhtml1/pe = 'text/javascript';<meta name="descriptionparentNode.insertBefore<input type="hidden" najs" type="text/javascri(document).ready(functiscript type="text/javasimage" content="http://UA-Compatible" content=tml; charset=utf-8" /> link rel="shortcut icon<link rel="stylesheet" </script> <script type== document.createElemen<a target="_blank" href= document.getElementsBinput type="text" name=a.type = 'text/javascrinput type="hidden" namehtml; charset=utf-8" />dtd"> <html xmlns="http-//W3C//DTD HTML 4.01 TentsByTagName('script')input type="hidden" nam<script type="text/javas" style="display:none;">document.getElementById(=document.createElement(' type='text/javascript'input type="text" name="d.getElementsByTagName(snical" href="http://www.C//DTD HTML 4.01 Transit<style type="text/css"> <style type="text/css">ional.dtd"> <html xmlns=http-equiv="Content-Typeding="0" cellspacing="0"html; charset=utf-8" /> style="display:none;"><<li><a href="http://www. type='text/javascript'>деятельностисоответствиипроизводствабезопасностиपुस्तिकाकांग्रेसउन्होंनेविधानसभाफिक्सिंगसुरक्षितकॉपीराइटविज्ञापनकार्रवाईसक्रियता
timedownlifeleftbackcodedatashowonlysitecityopenjustlikefreeworktextyearoverbodyloveformbookplaylivelinehelphomesidemorewordlongthemviewfindpagedaysfullheadtermeachareafromtruemarkableuponhighdatelandnewsevennextcasebothpostusedmadehandherewhatnameLinkblogsizebaseheldmakemainuser') +holdendswithNewsreadweresigntakehavegameseencallpathwellplusmenufilmpartjointhislistgoodneedwayswestjobsmindalsologorichuseslastteamarmyfoodkingwilleastwardbestfirePageknowaway.pngmovethanloadgiveselfnotemuchfeedmanyrockicononcelookhidediedHomerulehostajaxinfoclublawslesshalfsomesuchzone100%onescareTimeracebluefourweekfacehopegavehardlostwhenparkkeptpassshiproomHTMLplanTypedonesavekeepflaglinksoldfivetookratetownjumpthusdarkcardfilefearstaykillthatfallautoever.comtalkshopvotedeepmoderestturnbornbandfellroseurl(skinrolecomeactsagesmeetgold.jpgitemvaryfeltthensenddropViewcopy1.0"</a>stopelseliestourpack.gifpastcss?graymean&gt;rideshotlatesaidroadvar feeljohnrickportfast'UA-dead</b>poorbilltypeU.S.woodmust2px;Inforankwidewantwalllead[0];paulwavesure$('#waitmassarmsgoesgainlangpaid!-- lockunitrootwalkfirmwifexml"songtest20pxkindrowstoolfontmailsafestarmapscorerainflowbabyspansays4px;6px;artsfootrealwikiheatsteptriporg/lakeweaktoldFormcastfansbankveryrunsjulytask1px;goalgrewslowedgeid="sets5px;.js?40pxif (soonseatnonetubezerosentreedfactintogiftharm18pxcamehillboldzoomvoideasyringfillpeakinitcost3px;jacktagsbitsrolleditknewnear<!--growJSONdutyNamesaleyou lotspainjazzcoldeyesfishwww.risktabsprev10pxrise25pxBlueding300,ballfordearnwildbox.fairlackverspairjunetechif(!pickevil$("#warmlorddoespull,000ideadrawhugespotfundburnhrefcellkeystickhourlossfuel12pxsuitdealRSS"agedgreyGET"easeaimsgirlaids8px;navygridtips#999warsladycars); }php?helltallwhomzh:*/ 100hall. A7px;pushchat0px;crew*/</hash75pxflatrare && tellcampontolaidmissskiptentfinemalegetsplot400, coolfeet.php<br>ericmostguidbelldeschairmathatom/img&#82luckcent000;tinygonehtmlselldrugFREEnodenick?id=losenullvastwindRSS wearrelybeensamedukenasacapewishgulfT23:hitsslotgatekickblurthey15px''););">msiewinsbirdsortbetaseekT18:ordstreemall60pxfarm’sboys[0].');"POSTbearkids);}}marytend(UK)quadzh:-siz----prop'); liftT19:viceandydebt>RSSpoolneckblowT16:doorevalT17:letsfailoralpollnovacolsgene —softrometillross<h3>pourfadepink<tr>mini)|!(minezh:barshear00);milk -->ironfreddiskwentsoilputs/js/holyT22:ISBNT20:adamsees<h2>json', 'contT21: RSSloopasiamoon</p>soulLINEfortcartT14:<h1>80px!--<9px;T04:mike:46ZniceinchYorkricezh:'));puremageparatonebond:37Z_of_']);000,zh:tankyardbowlbush:56ZJava30px |} %C3%:34ZjeffEXPIcashvisagolfsnowzh:quer.csssickmeatmin.binddellhirepicsrent:36ZHTTP-201fotowolfEND xbox:54ZBODYdick; } exit:35Zvarsbeat'});diet999;anne}}</[i].Langkm²wiretoysaddssealalex; }echonine.org005)tonyjewssandlegsroof000) 200winegeardogsbootgarycutstyletemption.xmlcockgang$('.50pxPh.Dmiscalanloandeskmileryanunixdisc);} dustclip). 70px-200DVDs7]><tapedemoi++)wageeurophiloptsholeFAQsasin-26TlabspetsURL bulkcook;} HEAD[0])abbrjuan(198leshtwin</i>sonyguysfuckpipe|- !002)ndow[1];[]; Log salt bangtrimbath){ 00px });ko:feesad> s:// [];tollplug(){ { .js'200pdualboat.JPG); }quot); '); } 201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037201320122011201020092008200720062005200420032002200120001999199819971996199519941993199219911990198919881987198619851984198319821981198019791978197719761975197419731972197119701969196819671966196519641963196219611960195919581957195619551954195319521951195010001024139400009999comomásesteestaperotodohacecadaañobiendíaasívidacasootroforosolootracualdijosidograntipotemadebealgoquéestonadatrespococasabajotodasinoaguapuesunosantediceluisellamayozonaamorpisoobraclicellodioshoracasiзанаомрарутанепоотизнодотожеонихНаеебымыВысовывоНообПолиниРФНеМытыОнимдаЗаДаНуОбтеИзейнуммТыужفيأنمامعكلأورديافىهولملكاولهبسالإنهيأيقدهلثمبهلوليبلايبكشيامأمنتبيلنحبهممشوشfirstvideolightworldmediawhitecloseblackrightsmallbooksplacemusicfieldorderpointvalueleveltableboardhousegroupworksyearsstatetodaywaterstartstyledeathpowerphonenighterrorinputabouttermstitletoolseventlocaltimeslargewordsgamesshortspacefocusclearmodelblockguideradiosharewomenagainmoneyimagenamesyounglineslatercolorgreenfront&amp;watchforcepricerulesbeginaftervisitissueareasbelowindextotalhourslabelprintpressbuiltlinksspeedstudytradefoundsenseundershownformsrangeaddedstillmovedtakenaboveflashfixedoftenotherviewschecklegalriveritemsquickshapehumanexistgoingmoviethirdbasicpeacestagewidthloginideaswrotepagesusersdrivestorebreaksouthvoicesitesmonthwherebuildwhichearthforumthreesportpartyClicklowerlivesclasslayerentrystoryusagesoundcourtyour birthpopuptypesapplyImagebeinguppernoteseveryshowsmeansextramatchtrackknownearlybegansuperpapernorthlearngivennamedendedTermspartsGroupbrandusingwomanfalsereadyaudiotakeswhile.com/livedcasesdailychildgreatjudgethoseunitsneverbroadcoastcoverapplefilescyclesceneplansclickwritequeenpieceemailframeolderphotolimitcachecivilscaleenterthemetheretouchboundroyalaskedwholesincestock namefaithheartemptyofferscopeownedmightalbumthinkbloodarraymajortrustcanonunioncountvalidstoneStyleLoginhappyoccurleft:freshquitefilmsgradeneedsurbanfightbasishoverauto;route.htmlmixedfinalYour slidetopicbrownalonedrawnsplitreachRightdatesmarchquotegoodsLinksdoubtasyncthumballowchiefyouthnovel10px;serveuntilhandsCheckSpacequeryjamesequaltwice0,000Startpanelsongsroundeightshiftworthpostsleadsweeksavoidthesemilesplanesmartalphaplantmarksratesplaysclaimsalestextsstarswrong</h3>thing.org/multiheardPowerstandtokensolid(thisbringshipsstafftriedcallsfullyfactsagentThis //-->adminegyptEvent15px;Emailtrue"crossspentblogsbox">notedleavechinasizesguest</h4>robotheavytrue,sevengrandcrimesignsawaredancephase><!--en_US&#39;200px_namelatinenjoyajax.ationsmithU.S. holdspeterindianav">chainscorecomesdoingpriorShare1990sromanlistsjapanfallstrialowneragree</h2>abusealertopera"-//WcardshillsteamsPhototruthclean.php?saintmetallouismeantproofbriefrow">genretrucklooksValueFrame.net/--> <try { var makescostsplainadultquesttrainlaborhelpscausemagicmotortheir250pxleaststepsCountcouldglasssidesfundshotelawardmouthmovesparisgivesdutchtexasfruitnull,||[];top"> <!--POST"ocean<br/>floorspeakdepth sizebankscatchchart20px;aligndealswould50px;url="parksmouseMost ...</amongbrainbody none;basedcarrydraftreferpage_home.meterdelaydreamprovejoint</tr>drugs<!-- aprilidealallenexactforthcodeslogicView seemsblankports (200saved_linkgoalsgrantgreekhomesringsrated30px;whoseparse();" Blocklinuxjonespixel');">);if(-leftdavidhorseFocusraiseboxesTrackement</em>bar">.src=toweralt="cablehenry24px;setupitalysharpminortastewantsthis.resetwheelgirls/css/100%;clubsstuffbiblevotes 1000korea}); bandsqueue= {};80px;cking{ aheadclockirishlike ratiostatsForm"yahoo)[0];Aboutfinds</h1>debugtasksURL =cells})();12px;primetellsturns0x600.jpg"spainbeachtaxesmicroangel--></giftssteve-linkbody.}); mount (199FAQ</rogerfrankClass28px;feeds<h1><scotttests22px;drink) || lewisshall#039; for lovedwaste00px;ja:simon<fontreplymeetsuntercheaptightBrand) != dressclipsroomsonkeymobilmain.Name platefunnytreescom/"1.jpgwmodeparamSTARTleft idden, 201); } form.viruschairtransworstPagesitionpatch<!-- o-cacfirmstours,000 asiani++){adobe')[0]id=10both;menu .2.mi.png"kevincoachChildbruce2.jpgURL)+.jpg|suitesliceharry120" sweettr> name=diegopage swiss--> #fff;">Log.com"treatsheet) && 14px;sleepntentfiledja:id="cName"worseshots-box-delta &lt;bears:48Z<data-rural</a> spendbakershops= "";php">ction13px;brianhellosize=o=%2F joinmaybe<img img">, fjsimg" ")[0]MTopBType"newlyDanskczechtrailknows</h5>faq">zh-cn10); -1");type=bluestrulydavis.js';> <!steel you h2> form jesus100% menu. walesrisksumentddingb-likteachgif" vegasdanskeestishqipsuomisobredesdeentretodospuedeañosestátienehastaotrospartedondenuevohacerformamismomejormundoaquídíassóloayudafechatodastantomenosdatosotrassitiomuchoahoralugarmayorestoshorastenerantesfotosestaspaísnuevasaludforosmedioquienmesespoderchileserávecesdecirjoséestarventagrupohechoellostengoamigocosasnivelgentemismaairesjuliotemashaciafavorjuniolibrepuntobuenoautorabrilbuenatextomarzosaberlistaluegocómoenerojuegoperúhaberestoynuncamujervalorfueralibrogustaigualvotoscasosguíapuedosomosavisousteddebennochebuscafaltaeurosseriedichocursoclavecasasleónplazolargoobrasvistaapoyojuntotratavistocrearcampohemoscincocargopisosordenhacenáreadiscopedrocercapuedapapelmenorútilclarojorgecalleponertardenadiemarcasigueellassiglocochemotosmadreclaserestoniñoquedapasarbancohijosviajepabloéstevienereinodejarfondocanalnorteletracausatomarmanoslunesautosvillavendopesartipostengamarcollevapadreunidovamoszonasambosbandamariaabusomuchasubirriojavivirgradochicaallíjovendichaestantalessalirsuelopesosfinesllamabuscoéstalleganegroplazahumorpagarjuntadobleislasbolsabañohablaluchaÁreadicenjugarnotasvalleallácargadolorabajoestégustomentemariofirmacostofichaplatahogarartesleyesaquelmuseobasespocosmitadcielochicomiedoganarsantoetapadebesplayaredessietecortecoreadudasdeseoviejodeseaaguas&quot;domaincommonstatuseventsmastersystemactionbannerremovescrollupdateglobalmediumfilternumberchangeresultpublicscreenchoosenormaltravelissuessourcetargetspringmodulemobileswitchphotosborderregionitselfsocialactivecolumnrecordfollowtitle>eitherlengthfamilyfriendlayoutauthorcreatereviewsummerserverplayedplayerexpandpolicyformatdoublepointsseriespersonlivingdesignmonthsforcesuniqueweightpeopleenergynaturesearchfigurehavingcustomoffsetletterwindowsubmitrendergroupsuploadhealthmethodvideosschoolfutureshadowdebatevaluesObjectothersrightsleaguechromesimplenoticesharedendingseasonreportonlinesquarebuttonimagesenablemovinglatestwinterFranceperiodstrongrepeatLondondetailformeddemandsecurepassedtoggleplacesdevicestaticcitiesstreamyellowattackstreetflighthiddeninfo">openedusefulvalleycausesleadersecretseconddamagesportsexceptratingsignedthingseffectfieldsstatesofficevisualeditorvolumeReportmuseummoviesparentaccessmostlymother" id="marketgroundchancesurveybeforesymbolmomentspeechmotioninsidematterCenterobjectexistsmiddleEuropegrowthlegacymannerenoughcareeransweroriginportalclientselectrandomclosedtopicscomingfatheroptionsimplyraisedescapechosenchurchdefinereasoncorneroutputmemoryiframepolicemodelsNumberduringoffersstyleskilledlistedcalledsilvermargindeletebetterbrowselimitsGlobalsinglewidgetcenterbudgetnowrapcreditclaimsenginesafetychoicespirit-stylespreadmakingneededrussiapleaseextentScriptbrokenallowschargedividefactormember-basedtheoryconfigaroundworkedhelpedChurchimpactshouldalwayslogo" bottomlist">){var prefixorangeHeader.push(couplegardenbridgelaunchReviewtakingvisionlittledatingButtonbeautythemesforgotSearchanchoralmostloadedChangereturnstringreloadMobileincomesupplySourceordersviewed&nbsp;courseAbout island<html cookiename="amazonmodernadvicein</a>: The dialoghousesBEGIN MexicostartscentreheightaddingIslandassetsEmpireSchooleffortdirectnearlymanualSelect. Onejoinedmenu">PhilipawardshandleimportOfficeregardskillsnationSportsdegreeweekly (e.g.behinddoctorloggedunited</b></beginsplantsassistartistissued300px|canadaagencyschemeremainBrazilsamplelogo">beyond-scaleacceptservedmarineFootercamera</h1> _form"leavesstress" /> .gif" onloadloaderOxfordsistersurvivlistenfemaleDesignsize="appealtext">levelsthankshigherforcedanimalanyoneAfricaagreedrecentPeople<br />wonderpricesturned|| {};main">inlinesundaywrap">failedcensusminutebeaconquotes150px|estateremoteemail"linkedright;signalformal1.htmlsignupprincefloat:.png" forum.AccesspaperssoundsextendHeightsliderUTF-8"&amp; Before. WithstudioownersmanageprofitjQueryannualparamsboughtfamousgooglelongeri++) {israelsayingdecidehome">headerensurebranchpiecesblock;statedtop"><racingresize--&gt;pacitysexualbureau.jpg" 10,000obtaintitlesamount, Inc.comedymenu" lyricstoday.indeedcounty_logo.FamilylookedMarketlse ifPlayerturkey);var forestgivingerrorsDomain}else{insertBlog</footerlogin.fasteragents<body 10px 0pragmafridayjuniordollarplacedcoversplugin5,000 page">boston.test(avatartested_countforumsschemaindex,filledsharesreaderalert(appearSubmitline">body"> * TheThoughseeingjerseyNews</verifyexpertinjurywidth=CookieSTART across_imagethreadnativepocketbox"> System DavidcancertablesprovedApril reallydriveritem">more">boardscolorscampusfirst || [];media.guitarfinishwidth:showedOther .php" assumelayerswilsonstoresreliefswedenCustomeasily your String Whiltaylorclear:resortfrenchthough") + "<body>buyingbrandsMembername">oppingsector5px;">vspacepostermajor coffeemartinmaturehappen</nav>kansaslink">Images=falsewhile hspace0&amp; In powerPolski-colorjordanBottomStart -count2.htmlnews">01.jpgOnline-rightmillerseniorISBN 00,000 guidesvalue)ectionrepair.xml" rights.html-blockregExp:hoverwithinvirginphones</tr> using var >'); </td> </tr> bahasabrasilgalegomagyarpolskisrpskiردو中文简体繁體信息中国我们一个公司管理论坛可以服务时间个人产品自己企业查看工作联系没有网站所有评论中心文章用户首页作者技术问题相关下载搜索使用软件在线主题资料视频回复注册网络收藏内容推荐市场消息空间发布什么好友生活图片发展如果手机新闻最新方式北京提供关于更多这个系统知道游戏广告其他发表安全第一会员进行点击版权电子世界设计免费教育加入活动他们商品博客现在上海如何已经留言详细社区登录本站需要价格支持国际链接国家建设朋友阅读法律位置经济选择这样当前分类排行因为交易最后音乐不能通过行业科技可能设备合作大家社会研究专业全部项目这里还是开始情况电脑文件品牌帮助文化资源大学学习地址浏览投资工程要求怎么时候功能主要目前资讯城市方法电影招聘声明任何健康数据美国汽车介绍但是交流生产所以电话显示一些单位人员分析地图旅游工具学生系列网友帖子密码频道控制地区基本全国网上重要第二喜欢进入友情这些考试发现培训以上政府成为环境香港同时娱乐发送一定开发作品标准欢迎解决地方一下以及责任或者客户代表积分女人数码销售出现离线应用列表不同编辑统计查询不要有关机构很多播放组织政策直接能力来源時間看到热门关键专区非常英语百度希望美女比较知识规定建议部门意见精彩日本提高发言方面基金处理权限影片银行还有分享物品经营添加专家这种话题起来业务公告记录简介质量男人影响引用报告部分快速咨询时尚注意申请学校应该历史只是返回购买名称为了成功说明供应孩子专题程序一般會員只有其它保护而且今天窗口动态状态特别认为必须更新小说我們作为媒体包括那么一样国内是否根据电视学院具有过程由于人才出来不过正在明星故事关系标题商务输入一直基础教学了解建筑结果全球通知计划对于艺术相册发生真的建立等级类型经验实现制作来自标签以下原创无法其中個人一切指南关闭集团第三关注因此照片深圳商业广州日期高级最近综合表示专辑行为交通评价觉得精华家庭完成感觉安装得到邮件制度食品虽然转载报价记者方案行政人民用品东西提出酒店然后付款热点以前完全发帖设置领导工业医院看看经典原因平台各种增加材料新增之后职业效果今年论文我国告诉版主修改参与打印快乐机械观点存在精神获得利用继续你们这么模式语言能够雅虎操作风格一起科学体育短信条件治疗运动产业会议导航先生联盟可是問題结构作用调查資料自动负责农业访问实施接受讨论那个反馈加强女性范围服務休闲今日客服觀看参加的话一点保证图书有效测试移动才能决定股票不断需求不得办法之间采用营销投诉目标爱情摄影有些複製文学机会数字装修购物农村全面精品其实事情水平提示上市谢谢普通教师上传类别歌曲拥有创新配件只要时代資訊达到人生订阅老师展示心理贴子網站主題自然级别简单改革那些来说打开代码删除证券节目重点次數多少规划资金找到以后大全主页最佳回答天下保障现代检查投票小时沒有正常甚至代理目录公开复制金融幸福版本形成准备行情回到思想怎样协议认证最好产生按照服装广东动漫采购新手组图面板参考政治容易天地努力人们升级速度人物调整流行造成文字韩国贸易开展相關表现影视如此美容大小报道条款心情许多法规家居书店连接立即举报技巧奥运登入以来理论事件自由中华办公妈妈真正不错全文合同价值别人监督具体世纪团队创业承担增长有人保持商家维修台湾左右股份答案实际电信经理生命宣传任务正式特色下来协会只能当然重新內容指导运行日志賣家超过土地浙江支付推出站长杭州执行制造之一推广现场描述变化传统歌手保险课程医疗经过过去之前收入年度杂志美丽最高登陆未来加工免责教程版块身体重庆出售成本形式土豆出價东方邮箱南京求职取得职位相信页面分钟网页确定图例网址积极错误目的宝贝机关风险授权病毒宠物除了評論疾病及时求购站点儿童每天中央认识每个天津字体台灣维护本页个性官方常见相机战略应当律师方便校园股市房屋栏目员工导致突然道具本网结合档案劳动另外美元引起改变第四会计說明隐私宝宝规范消费共同忘记体系带来名字發表开放加盟受到二手大量成人数量共享区域女孩原则所在结束通信超级配置当时优秀性感房产遊戲出口提交就业保健程度参数事业整个山东情感特殊分類搜尋属于门户财务声音及其财经坚持干部成立利益考虑成都包装用戶比赛文明招商完整真是眼睛伙伴威望领域卫生优惠論壇公共良好充分符合附件特点不可英文资产根本明显密碼公众民族更加享受同学启动适合原来问答本文美食绿色稳定终于生物供求搜狐力量严重永远写真有限竞争对象费用不好绝对十分促进点评影音优势不少欣赏并且有点方向全新信用设施形象资格突破随着重大于是毕业智能化工完美商城统一出版打造產品概况用于保留因素中國存储贴图最愛长期口价理财基地安排武汉里面创建天空首先完善驱动下面不再诚信意义阳光英国漂亮军事玩家群众农民即可名稱家具动画想到注明小学性能考研硬件观看清楚搞笑首頁黄金适用江苏真实主管阶段註冊翻译权利做好似乎通讯施工狀態也许环保培养概念大型机票理解匿名cuandoenviarmadridbuscariniciotiempoporquecuentaestadopuedenjuegoscontraestánnombretienenperfilmaneraamigosciudadcentroaunquepuedesdentroprimerpreciosegúnbuenosvolverpuntossemanahabíaagostonuevosunidoscarlosequiponiñosmuchosalgunacorreoimagenpartirarribamaríahombreempleoverdadcambiomuchasfueronpasadolíneaparecenuevascursosestabaquierolibroscuantoaccesomiguelvarioscuatrotienesgruposseráneuropamediosfrenteacercademásofertacochesmodeloitalialetrasalgúncompracualesexistecuerposiendoprensallegarviajesdineromurciapodrápuestodiariopuebloquieremanuelpropiocrisisciertoseguromuertefuentecerrargrandeefectopartesmedidapropiaofrecetierrae-mailvariasformasfuturoobjetoseguirriesgonormasmismosúnicocaminositiosrazóndebidopruebatoledoteníajesúsesperococinaorigentiendacientocádizhablarseríalatinafuerzaestiloguerraentraréxitolópezagendavídeoevitarpaginametrosjavierpadresfácilcabezaáreassalidaenvíojapónabusosbienestextosllevarpuedanfuertecomúnclaseshumanotenidobilbaounidadestáseditarcreadoдлячтокакилиэтовсеегопритакещеужеКакбезбылониВсеподЭтотомчемнетлетразонагдемнеДляПринаснихтемктогодвоттамСШАмаяЧтовасвамемуТакдванамэтиэтуВамтехпротутнаддняВоттринейВаснимсамтотрубОнимирнееОООлицэтаОнанемдоммойдвеоносудकेहैकीसेकाकोऔरपरनेएककिभीइसकरतोहोआपहीयहयातकथाjagranआजजोअबदोगईजागएहमइनवहयेथेथीघरजबदीकईजीवेनईनएहरउसमेकमवोलेसबमईदेओरआमबसभरबनचलमनआगसीलीعلىإلىهذاآخرعددالىهذهصورغيركانولابينعرضذلكهنايومقالعليانالكنحتىقبلوحةاخرفقطعبدركنإذاكمااحدإلافيهبعضكيفبحثومنوهوأناجدالهاسلمعندليسعبرصلىمنذبهاأنهمثلكنتالاحيثمصرشرححولوفياذالكلمرةانتالفأبوخاصأنتانهاليعضووقدابنخيربنتلكمشاءوهيابوقصصومارقمأحدنحنعدمرأياحةكتبدونيجبمنهتحتجهةسنةيتمكرةغزةنفسبيتللهلناتلكقلبلماعنهأولشيءنورأمافيكبكلذاترتببأنهمسانكبيعفقدحسنلهمشعرأهلشهرقطرطلبprofileservicedefaulthimselfdetailscontentsupportstartedmessagesuccessfashion<title>countryaccountcreatedstoriesresultsrunningprocesswritingobjectsvisiblewelcomearticleunknownnetworkcompanydynamicbrowserprivacyproblemServicerespectdisplayrequestreservewebsitehistoryfriendsoptionsworkingversionmillionchannelwindow.addressvisitedweathercorrectproductedirectforwardyou canremovedsubjectcontrolarchivecurrentreadinglibrarylimitedmanagerfurthersummarymachineminutesprivatecontextprogramsocietynumberswrittenenabledtriggersourcesloadingelementpartnerfinallyperfectmeaningsystemskeepingculture&quot;,journalprojectsurfaces&quot;expiresreviewsbalanceEnglishContentthroughPlease opinioncontactaverageprimaryvillageSpanishgallerydeclinemeetingmissionpopularqualitymeasuregeneralspeciessessionsectionwriterscounterinitialreportsfiguresmembersholdingdisputeearlierexpressdigitalpictureAnothermarriedtrafficleadingchangedcentralvictoryimages/reasonsstudiesfeaturelistingmust beschoolsVersionusuallyepisodeplayinggrowingobviousoverlaypresentactions</ul> wrapperalreadycertainrealitystorageanotherdesktopofferedpatternunusualDigitalcapitalWebsitefailureconnectreducedAndroiddecadesregular &amp; animalsreleaseAutomatgettingmethodsnothingPopularcaptionletterscapturesciencelicensechangesEngland=1&amp;History = new CentralupdatedSpecialNetworkrequirecommentwarningCollegetoolbarremainsbecauseelectedDeutschfinanceworkersquicklybetweenexactlysettingdiseaseSocietyweaponsexhibit&lt;!--Controlclassescoveredoutlineattacksdevices(windowpurposetitle="Mobile killingshowingItaliandroppedheavilyeffects-1']); confirmCurrentadvancesharingopeningdrawingbillionorderedGermanyrelated</form>includewhetherdefinedSciencecatalogArticlebuttonslargestuniformjourneysidebarChicagoholidayGeneralpassage,&quot;animatefeelingarrivedpassingnaturalroughly. The but notdensityBritainChineselack oftributeIreland" data-factorsreceivethat isLibraryhusbandin factaffairsCharlesradicalbroughtfindinglanding:lang="return leadersplannedpremiumpackageAmericaEdition]&quot;Messageneed tovalue="complexlookingstationbelievesmaller-mobilerecordswant tokind ofFirefoxyou aresimilarstudiedmaximumheadingrapidlyclimatekingdomemergedamountsfoundedpioneerformuladynastyhow to SupportrevenueeconomyResultsbrothersoldierlargelycalling.&quot;AccountEdward segmentRobert effortsPacificlearnedup withheight:we haveAngelesnations_searchappliedacquiremassivegranted: falsetreatedbiggestbenefitdrivingStudiesminimumperhapsmorningsellingis usedreversevariant role="missingachievepromotestudentsomeoneextremerestorebottom:evolvedall thesitemapenglishway to AugustsymbolsCompanymattersmusicalagainstserving})(); paymenttroubleconceptcompareparentsplayersregionsmonitor ''The winningexploreadaptedGalleryproduceabilityenhancecareers). The collectSearch ancientexistedfooter handlerprintedconsoleEasternexportswindowsChannelillegalneutralsuggest_headersigning.html">settledwesterncausing-webkitclaimedJusticechaptervictimsThomas mozillapromisepartieseditionoutside:false,hundredOlympic_buttonauthorsreachedchronicdemandssecondsprotectadoptedprepareneithergreatlygreateroverallimprovecommandspecialsearch.worshipfundingthoughthighestinsteadutilityquarterCulturetestingclearlyexposedBrowserliberal} catchProjectexamplehide();FloridaanswersallowedEmperordefenseseriousfreedomSeveral-buttonFurtherout of != nulltrainedDenmarkvoid(0)/all.jspreventRequestStephen When observe</h2> Modern provide" alt="borders. For Many artistspoweredperformfictiontype ofmedicalticketsopposedCouncilwitnessjusticeGeorge Belgium...</a>twitternotablywaitingwarfare Other rankingphrasesmentionsurvivescholar</p> Countryignoredloss ofjust asGeorgiastrange<head><stopped1']); islandsnotableborder:list ofcarried100,000</h3> severalbecomesselect wedding00.htmlmonarchoff theteacherhighly biologylife ofor evenrise of&raquo;plusonehunting(thoughDouglasjoiningcirclesFor theAncientVietnamvehiclesuch ascrystalvalue =Windowsenjoyeda smallassumed<a id="foreign All rihow theDisplayretiredhoweverhidden;battlesseekingcabinetwas notlook atconductget theJanuaryhappensturninga:hoverOnline French lackingtypicalextractenemieseven ifgeneratdecidedare not/searchbeliefs-image:locatedstatic.login">convertviolententeredfirst">circuitFinlandchemistshe was10px;">as suchdivided</span>will beline ofa greatmystery/index.fallingdue to railwaycollegemonsterdescentit withnuclearJewish protestBritishflowerspredictreformsbutton who waslectureinstantsuicidegenericperiodsmarketsSocial fishingcombinegraphicwinners<br /><by the NaturalPrivacycookiesoutcomeresolveSwedishbrieflyPersianso muchCenturydepictscolumnshousingscriptsnext tobearingmappingrevisedjQuery(-width:title">tooltipSectiondesignsTurkishyounger.match(})(); burningoperatedegreessource=Richardcloselyplasticentries</tr> color:#ul id="possessrollingphysicsfailingexecutecontestlink toDefault<br /> : true,chartertourismclassicproceedexplain</h1> online.?xml vehelpingdiamonduse theairlineend -->).attr(readershosting#ffffffrealizeVincentsignals src="/ProductdespitediversetellingPublic held inJoseph theatreaffects<style>a largedoesn'tlater, ElementfaviconcreatorHungaryAirportsee theso thatMichaelSystemsPrograms, and width=e&quot;tradingleft"> personsGolden Affairsgrammarformingdestroyidea ofcase ofoldest this is.src = cartoonregistrCommonsMuslimsWhat isin manymarkingrevealsIndeed,equally/show_aoutdoorescape(Austriageneticsystem,In the sittingHe alsoIslandsAcademy <!--Daniel bindingblock">imposedutilizeAbraham(except{width:putting).html(|| []; DATA[ *kitchenmountedactual dialectmainly _blank'installexpertsif(typeIt also&copy; ">Termsborn inOptionseasterntalkingconcerngained ongoingjustifycriticsfactoryits ownassaultinvitedlastinghis ownhref="/" rel="developconcertdiagramdollarsclusterphp?id=alcohol);})();using a><span>vesselsrevivalAddressamateurandroidallegedillnesswalkingcentersqualifymatchesunifiedextinctDefensedied in <!-- customslinkingLittle Book ofeveningmin.js?are thekontakttoday's.html" target=wearingAll Rig; })();raising Also, crucialabout">declare--> <scfirefoxas muchappliesindex, s, but type = <!--towardsRecordsPrivateForeignPremierchoicesVirtualreturnsCommentPoweredinline;povertychamberLiving volumesAnthonylogin" RelatedEconomyreachescuttinggravitylife inChapter-shadowNotable</td> returnstadiumwidgetsvaryingtravelsheld bywho arework infacultyangularwho hadairporttown of Some 'click'chargeskeywordit willcity of(this);Andrew unique checkedor more300px; return;rsion="pluginswithin herselfStationFederalventurepublishsent totensionactresscome tofingersDuke ofpeople,exploitwhat isharmonya major":"httpin his menu"> monthlyofficercouncilgainingeven inSummarydate ofloyaltyfitnessand wasemperorsupremeSecond hearingRussianlongestAlbertalateralset of small">.appenddo withfederalbank ofbeneathDespiteCapitalgrounds), and percentit fromclosingcontainInsteadfifteenas well.yahoo.respondfighterobscurereflectorganic= Math.editingonline paddinga wholeonerroryear ofend of barrierwhen itheader home ofresumedrenamedstrong>heatingretainscloudfrway of March 1knowingin partBetweenlessonsclosestvirtuallinks">crossedEND -->famous awardedLicenseHealth fairly wealthyminimalAfricancompetelabel">singingfarmersBrasil)discussreplaceGregoryfont copursuedappearsmake uproundedboth ofblockedsaw theofficescoloursif(docuwhen heenforcepush(fuAugust UTF-8">Fantasyin mostinjuredUsuallyfarmingclosureobject defenceuse of Medical<body> evidentbe usedkeyCodesixteenIslamic#000000entire widely active (typeofone cancolor =speakerextendsPhysicsterrain<tbody>funeralviewingmiddle cricketprophetshifteddoctorsRussell targetcompactalgebrasocial-bulk ofman and</td> he left).val()false);logicalbankinghome tonaming Arizonacredits); }); founderin turnCollinsbefore But thechargedTitle">CaptainspelledgoddessTag -->Adding:but wasRecent patientback in=false&Lincolnwe knowCounterJudaismscript altered']); has theunclearEvent',both innot all <!-- placinghard to centersort ofclientsstreetsBernardassertstend tofantasydown inharbourFreedomjewelry/about..searchlegendsis mademodern only ononly toimage" linear painterand notrarely acronymdelivershorter00&amp;as manywidth="/* <![Ctitle =of the lowest picked escapeduses ofpeoples PublicMatthewtacticsdamagedway forlaws ofeasy to windowstrong simple}catch(seventhinfoboxwent topaintedcitizenI don'tretreat. Some ww."); bombingmailto:made in. Many carries||{};wiwork ofsynonymdefeatsfavoredopticalpageTraunless sendingleft"><comScorAll thejQuery.touristClassicfalse" Wilhelmsuburbsgenuinebishops.split(global followsbody ofnominalContactsecularleft tochiefly-hidden-banner</li> . When in bothdismissExplorealways via thespañolwelfareruling arrangecaptainhis sonrule ofhe tookitself,=0&amp;(calledsamplesto makecom/pagMartin Kennedyacceptsfull ofhandledBesides//--></able totargetsessencehim to its by common.mineralto takeways tos.org/ladvisedpenaltysimple:if theyLettersa shortHerbertstrikes groups.lengthflightsoverlapslowly lesser social </p> it intoranked rate oful> attemptpair ofmake itKontaktAntoniohaving ratings activestreamstrapped").css(hostilelead tolittle groups,Picture--> rows=" objectinverse<footerCustomV><\/scrsolvingChamberslaverywoundedwhereas!= 'undfor allpartly -right:Arabianbacked centuryunit ofmobile-Europe,is homerisk ofdesiredClintoncost ofage of become none ofp&quot;Middle ead')[0Criticsstudios>&copy;group">assemblmaking pressedwidget.ps:" ? rebuiltby someFormer editorsdelayedCanonichad thepushingclass="but arepartialBabylonbottom carrierCommandits useAs withcoursesa thirddenotesalso inHouston20px;">accuseddouble goal ofFamous ).bind(priests Onlinein Julyst + "gconsultdecimalhelpfulrevivedis veryr'+'iptlosing femalesis alsostringsdays ofarrivalfuture <objectforcingString(" /> here isencoded. The balloondone by/commonbgcolorlaw of Indianaavoidedbut the2px 3pxjquery.after apolicy.men andfooter-= true;for usescreen.Indian image =family,http:// &nbsp;driverseternalsame asnoticedviewers})(); is moreseasonsformer the newis justconsent Searchwas thewhy theshippedbr><br>width: height=made ofcuisineis thata very Admiral fixed;normal MissionPress, ontariocharsettry to invaded="true"spacingis mosta more totallyfall of}); immensetime inset outsatisfyto finddown tolot of Playersin Junequantumnot thetime todistantFinnishsrc = (single help ofGerman law andlabeledforestscookingspace">header-well asStanleybridges/globalCroatia About [0]; it, andgroupedbeing a){throwhe madelighterethicalFFFFFF"bottom"like a employslive inas seenprintermost ofub-linkrejectsand useimage">succeedfeedingNuclearinformato helpWomen'sNeitherMexicanprotein<table by manyhealthylawsuitdevised.push({sellerssimply Through.cookie Image(older">us.js"> Since universlarger open to!-- endlies in']); marketwho is ("DOMComanagedone fortypeof Kingdomprofitsproposeto showcenter;made itdressedwere inmixtureprecisearisingsrc = 'make a securedBaptistvoting var March 2grew upClimate.removeskilledway the</head>face ofacting right">to workreduceshas haderectedshow();action=book ofan area== "htt<header <html>conformfacing cookie.rely onhosted .customhe wentbut forspread Family a meansout theforums.footage">MobilClements" id="as highintense--><!--female is seenimpliedset thea stateand hisfastestbesidesbutton_bounded"><img Infoboxevents,a youngand areNative cheaperTimeoutand hasengineswon the(mostlyright: find a -bottomPrince area ofmore ofsearch_nature,legallyperiod,land ofor withinducedprovingmissilelocallyAgainstthe wayk&quot;px;"> pushed abandonnumeralCertainIn thismore inor somename isand, incrownedISBN 0-createsOctobermay notcenter late inDefenceenactedwish tobroadlycoolingonload=it. TherecoverMembersheight assumes<html> people.in one =windowfooter_a good reklamaothers,to this_cookiepanel">London,definescrushedbaptismcoastalstatus title" move tolost inbetter impliesrivalryservers SystemPerhapses and contendflowinglasted rise inGenesisview ofrising seem tobut in backinghe willgiven agiving cities.flow of Later all butHighwayonly bysign ofhe doesdiffersbattery&amp;lasinglesthreatsintegertake onrefusedcalled =US&ampSee thenativesby thissystem.head of:hover,lesbiansurnameand allcommon/header__paramsHarvard/pixel.removalso longrole ofjointlyskyscraUnicodebr /> AtlantanucleusCounty,purely count">easily build aonclicka givenpointerh&quot;events else { ditionsnow the, with man whoorg/Webone andcavalryHe diedseattle00,000 {windowhave toif(windand itssolely m&quot;renewedDetroitamongsteither them inSenatorUs</a><King ofFrancis-produche usedart andhim andused byscoringat hometo haverelatesibilityfactionBuffalolink"><what hefree toCity ofcome insectorscountedone daynervoussquare };if(goin whatimg" alis onlysearch/tuesdaylooselySolomonsexual - <a hrmedium"DO NOT France,with a war andsecond take a > market.highwaydone inctivity"last">obligedrise to"undefimade to Early praisedin its for hisathleteJupiterYahoo! termed so manyreally s. The a woman?value=direct right" bicycleacing="day andstatingRather,higher Office are nowtimes, when a pay foron this-link">;borderaround annual the Newput the.com" takin toa brief(in thegroups.; widthenzymessimple in late{returntherapya pointbanninginks"> ();" rea place\u003Caabout atr> ccount gives a<SCRIPTRailwaythemes/toolboxById("xhumans,watchesin some if (wicoming formats Under but hashanded made bythan infear ofdenoted/iframeleft involtagein eacha&quot;base ofIn manyundergoregimesaction </p> <ustomVa;&gt;</importsor thatmostly &amp;re size="</a></ha classpassiveHost = WhetherfertileVarious=[];(fucameras/></td>acts asIn some> <!organis <br />Beijingcatalàdeutscheuropeueuskaragaeilgesvenskaespañamensajeusuariotrabajoméxicopáginasiempresistemaoctubreduranteañadirempresamomentonuestroprimeratravésgraciasnuestraprocesoestadoscalidadpersonanúmeroacuerdomúsicamiembroofertasalgunospaísesejemploderechoademásprivadoagregarenlacesposiblehotelessevillaprimeroúltimoeventosarchivoculturamujeresentradaanuncioembargomercadograndesestudiomejoresfebrerodiseñoturismocódigoportadaespaciofamiliaantoniopermiteguardaralgunaspreciosalguiensentidovisitastítuloconocersegundoconsejofranciaminutossegundatenemosefectosmálagasesiónrevistagranadacompraringresogarcíaacciónecuadorquienesinclusodeberámateriahombresmuestrapodríamañanaúltimaestamosoficialtambienningúnsaludospodemosmejorarpositionbusinesshomepagesecuritylanguagestandardcampaignfeaturescategoryexternalchildrenreservedresearchexchangefavoritetemplatemilitaryindustryservicesmaterialproductsz-index:commentssoftwarecompletecalendarplatformarticlesrequiredmovementquestionbuildingpoliticspossiblereligionphysicalfeedbackregisterpicturesdisabledprotocolaudiencesettingsactivityelementslearninganythingabstractprogressoverviewmagazineeconomictrainingpressurevarious <strong>propertyshoppingtogetheradvancedbehaviordownloadfeaturedfootballselectedLanguagedistanceremembertrackingpasswordmodifiedstudentsdirectlyfightingnortherndatabasefestivalbreakinglocationinternetdropdownpracticeevidencefunctionmarriageresponseproblemsnegativeprogramsanalysisreleasedbanner">purchasepoliciesregionalcreativeargumentbookmarkreferrerchemicaldivisioncallbackseparateprojectsconflicthardwareinterestdeliverymountainobtained= false;for(var acceptedcapacitycomputeridentityaircraftemployedproposeddomesticincludesprovidedhospitalverticalcollapseapproachpartnerslogo"><adaughterauthor" culturalfamilies/images/assemblypowerfulteachingfinisheddistrictcriticalcgi-bin/purposesrequireselectionbecomingprovidesacademicexerciseactuallymedicineconstantaccidentMagazinedocumentstartingbottom">observed: &quot;extendedpreviousSoftwarecustomerdecisionstrengthdetailedslightlyplanningtextareacurrencyeveryonestraighttransferpositiveproducedheritageshippingabsolutereceivedrelevantbutton" violenceanywherebenefitslaunchedrecentlyalliancefollowedmultiplebulletinincludedoccurredinternal$(this).republic><tr><tdcongressrecordedultimatesolution<ul id="discoverHome</a>websitesnetworksalthoughentirelymemorialmessagescontinueactive">somewhatvictoriaWestern title="LocationcontractvisitorsDownloadwithout right"> measureswidth = variableinvolvedvirginianormallyhappenedaccountsstandingnationalRegisterpreparedcontrolsaccuratebirthdaystrategyofficialgraphicscriminalpossiblyconsumerPersonalspeakingvalidateachieved.jpg" />machines</h2> keywordsfriendlybrotherscombinedoriginalcomposedexpectedadequatepakistanfollow" valuable</label>relativebringingincreasegovernorplugins/List of Header">" name=" (&quot;graduate</head> commercemalaysiadirectormaintain;height:schedulechangingback to catholicpatternscolor: #greatestsuppliesreliable</ul> <select citizensclothingwatching<li id="specificcarryingsentence<center>contrastthinkingcatch(e)southernMichael merchantcarouselpadding:interior.split("lizationOctober ){returnimproved--&gt; coveragechairman.png" />subjectsRichard whateverprobablyrecoverybaseballjudgmentconnect..css" /> websitereporteddefault"/></a> electricscotlandcreationquantity. ISBN 0did not instance-search-" lang="speakersComputercontainsarchivesministerreactiondiscountItalianocriteriastrongly: 'http:'script'coveringofferingappearedBritish identifyFacebooknumerousvehiclesconcernsAmericanhandlingdiv id="William provider_contentaccuracysection andersonflexibleCategorylawrence<script>layout="approved maximumheader"></table>Serviceshamiltoncurrent canadianchannels/themes//articleoptionalportugalvalue=""intervalwirelessentitledagenciesSearch" measuredthousandspending&hellip;new Date" size="pageNamemiddle" " /></a>hidden">sequencepersonaloverflowopinionsillinoislinks"> <title>versionssaturdayterminalitempropengineersectionsdesignerproposal="false"Españolreleasessubmit" er&quot;additionsymptomsorientedresourceright"><pleasurestationshistory.leaving border=contentscenter">. Some directedsuitablebulgaria.show();designedGeneral conceptsExampleswilliamsOriginal"><span>search">operatorrequestsa &quot;allowingDocumentrevision. The yourselfContact michiganEnglish columbiapriorityprintingdrinkingfacilityreturnedContent officersRussian generate-8859-1"indicatefamiliar qualitymargin:0 contentviewportcontacts-title">portable.length eligibleinvolvesatlanticonload="default.suppliedpaymentsglossary After guidance</td><tdencodingmiddle">came to displaysscottishjonathanmajoritywidgets.clinicalthailandteachers<head> affectedsupportspointer;toString</small>oklahomawill be investor0" alt="holidaysResourcelicensed (which . After considervisitingexplorerprimary search" android"quickly meetingsestimate;return ;color:# height=approval, &quot; checked.min.js"magnetic></a></hforecast. While thursdaydvertise&eacute;hasClassevaluateorderingexistingpatients Online coloradoOptions"campbell<!-- end</span><<br /> _popups|sciences,&quot; quality Windows assignedheight: <b classle&quot; value=" Companyexamples<iframe believespresentsmarshallpart of properly). The taxonomymuch of </span> " data-srtuguêsscrollTo project<head> attorneyemphasissponsorsfancyboxworld's wildlifechecked=sessionsprogrammpx;font- Projectjournalsbelievedvacationthompsonlightingand the special border=0checking</tbody><button Completeclearfix <head> article <sectionfindingsrole in popular Octoberwebsite exposureused to changesoperatedclickingenteringcommandsinformed numbers </div>creatingonSubmitmarylandcollegesanalyticlistingscontact.loggedInadvisorysiblingscontent"s&quot;)s. This packagescheckboxsuggestspregnanttomorrowspacing=icon.pngjapanesecodebasebutton">gamblingsuch as , while </span> missourisportingtop:1px .</span>tensionswidth="2lazyloadnovemberused in height="cript"> &nbsp;</<tr><td height:2/productcountry include footer" &lt;!-- title"></jquery.</form> (简体)(繁體)hrvatskiitalianoromânătürkçeاردوtambiénnoticiasmensajespersonasderechosnacionalserviciocontactousuariosprogramagobiernoempresasanunciosvalenciacolombiadespuésdeportesproyectoproductopúbliconosotroshistoriapresentemillonesmediantepreguntaanteriorrecursosproblemasantiagonuestrosopiniónimprimirmientrasaméricavendedorsociedadrespectorealizarregistropalabrasinterésentoncesespecialmiembrosrealidadcórdobazaragozapáginassocialesbloqueargestiónalquilersistemascienciascompletoversióncompletaestudiospúblicaobjetivoalicantebuscadorcantidadentradasaccionesarchivossuperiormayoríaalemaniafunciónúltimoshaciendoaquellosediciónfernandoambientefacebooknuestrasclientesprocesosbastantepresentareportarcongresopublicarcomerciocontratojóvenesdistritotécnicaconjuntoenergíatrabajarasturiasrecienteutilizarboletínsalvadorcorrectatrabajosprimerosnegocioslibertaddetallespantallapróximoalmeríaanimalesquiénescorazónsecciónbuscandoopcionesexteriorconceptotodavíagaleríaescribirmedicinalicenciaconsultaaspectoscríticadólaresjusticiadeberánperíodonecesitamantenerpequeñorecibidatribunaltenerifecancióncanariasdescargadiversosmallorcarequieretécnicodeberíaviviendafinanzasadelantefuncionaconsejosdifícilciudadesantiguasavanzadatérminounidadessánchezcampañasoftonicrevistascontienesectoresmomentosfacultadcréditodiversassupuestofactoressegundospequeñaгодаеслиестьбылобытьэтомЕслитогоменявсехэтойдажебылигодуденьэтотбыласебяодинсебенадосайтфотонегосвоисвойигрытожевсемсвоюлишьэтихпокаднейдомамиралиботемухотядвухсетилюдиделомиретебясвоевидечегоэтимсчеттемыценысталведьтемеводытебевышенамитипатомуправлицаоднагодызнаюмогудругвсейидеткиноодноделаделесрокиюнявесьЕстьразанашиاللهالتيجميعخاصةالذيعليهجديدالآنالردتحكمصفحةكانتاللييكونشبكةفيهابناتحواءأكثرخلالالحبدليلدروساضغطتكونهناكساحةناديالطبعليكشكرايمكنمنهاشركةرئيسنشيطماذاالفنشبابتعبررحمةكافةيقولمركزكلمةأحمدقلبييعنيصورةطريقشاركجوالأخرىمعناابحثعروضبشكلمسجلبنانخالدكتابكليةبدونأيضايوجدفريقكتبتأفضلمطبخاكثرباركافضلاحلىنفسهأيامردودأنهاديناالانمعرضتعلمداخلممكن  resourcescountriesquestionsequipmentcommunityavailablehighlightDTD/xhtmlmarketingknowledgesomethingcontainerdirectionsubscribeadvertisecharacter" value="</select>Australia" class="situationauthorityfollowingprimarilyoperationchallengedevelopedanonymousfunction functionscompaniesstructureagreement" title="potentialeducationargumentssecondarycopyrightlanguagesexclusivecondition</form> statementattentionBiography} else { solutionswhen the Analyticstemplatesdangeroussatellitedocumentspublisherimportantprototypeinfluence&raquo;</effectivegenerallytransformbeautifultransportorganizedpublishedprominentuntil thethumbnailNational .focus();over the migrationannouncedfooter"> exceptionless thanexpensiveformationframeworkterritoryndicationcurrentlyclassNamecriticismtraditionelsewhereAlexanderappointedmaterialsbroadcastmentionedaffiliate</option>treatmentdifferent/default.Presidentonclick="biographyotherwisepermanentFrançaisHollywoodexpansionstandards</style> reductionDecember preferredCambridgeopponentsBusiness confusion> <title>presentedexplaineddoes not worldwideinterfacepositionsnewspaper</table> mountainslike the essentialfinancialselectionaction="/abandonedEducationparseInt(stabilityunable to</title> relationsNote thatefficientperformedtwo yearsSince thethereforewrapper">alternateincreasedBattle ofperceivedtrying tonecessaryportrayedelectionsElizabeth</iframe>discoveryinsurances.length;legendaryGeographycandidatecorporatesometimesservices.inherited</strong>CommunityreligiouslocationsCommitteebuildingsthe worldno longerbeginningreferencecannot befrequencytypicallyinto the relative;recordingpresidentinitiallytechniquethe otherit can beexistenceunderlinethis timetelephoneitemscopepracticesadvantage);return For otherprovidingdemocracyboth the extensivesufferingsupportedcomputers functionpracticalsaid thatit may beEnglish</from the scheduleddownloads</label> suspectedmargin: 0spiritual</head> microsoftgraduallydiscussedhe becameexecutivejquery.jshouseholdconfirmedpurchasedliterallydestroyedup to thevariationremainingit is notcenturiesJapanese among thecompletedalgorithminterestsrebellionundefinedencourageresizableinvolvingsensitiveuniversalprovision(althoughfeaturingconducted), which continued-header">February numerous overflow:componentfragmentsexcellentcolspan="technicalnear the Advanced source ofexpressedHong Kong Facebookmultiple mechanismelevationoffensive</form> sponsoreddocument.or &quot;there arethose whomovementsprocessesdifficultsubmittedrecommendconvincedpromoting" width=".replace(classicalcoalitionhis firstdecisionsassistantindicatedevolution-wrapper"enough toalong thedelivered--> <!--American protectedNovember </style><furnitureInternet onblur="suspendedrecipientbased on Moreover,abolishedcollectedwere madeemotionalemergencynarrativeadvocatespx;bordercommitteddir="ltr"employeesresearch. selectedsuccessorcustomersdisplayedSeptemberaddClass(Facebook suggestedand lateroperatingelaborateSometimesInstitutecertainlyinstalledfollowersJerusalemthey havecomputinggeneratedprovincesguaranteearbitraryrecognizewanted topx;width:theory ofbehaviourWhile theestimatedbegan to it becamemagnitudemust havemore thanDirectoryextensionsecretarynaturallyoccurringvariablesgiven theplatform.</label><failed tocompoundskinds of societiesalongside --&gt; southwestthe rightradiationmay have unescape(spoken in" href="/programmeonly the come fromdirectoryburied ina similarthey were</font></Norwegianspecifiedproducingpassenger(new DatetemporaryfictionalAfter theequationsdownload.regularlydeveloperabove thelinked tophenomenaperiod oftooltip">substanceautomaticaspect ofAmong theconnectedestimatesAir Forcesystem ofobjectiveimmediatemaking itpaintingsconqueredare stillproceduregrowth ofheaded byEuropean divisionsmoleculesfranchiseintentionattractedchildhoodalso useddedicatedsingaporedegree offather ofconflicts</a></p> came fromwere usednote thatreceivingExecutiveeven moreaccess tocommanderPoliticalmusiciansdeliciousprisonersadvent ofUTF-8" /><![CDATA[">ContactSouthern bgcolor="series of. It was in Europepermittedvalidate.appearingofficialsseriously-languageinitiatedextendinglong-terminflationsuch thatgetCookiemarked by</button>implementbut it isincreasesdown the requiringdependent--> <!-- interviewWith the copies ofconsensuswas builtVenezuela(formerlythe statepersonnelstrategicfavour ofinventionWikipediacontinentvirtuallywhich wasprincipleComplete identicalshow thatprimitiveaway frommolecularpreciselydissolvedUnder theversion=">&nbsp;</It is the This is will haveorganismssome timeFriedrichwas firstthe only fact thatform id="precedingTechnicalphysicistoccurs innavigatorsection">span id="sought tobelow thesurviving}</style>his deathas in thecaused bypartiallyexisting using thewas givena list oflevels ofnotion ofOfficial dismissedscientistresemblesduplicateexplosiverecoveredall othergalleries{padding:people ofregion ofaddressesassociateimg alt="in modernshould bemethod ofreportingtimestampneeded tothe Greatregardingseemed toviewed asimpact onidea thatthe Worldheight ofexpandingThese arecurrent">carefullymaintainscharge ofClassicaladdressedpredictedownership<div id="right"> residenceleave thecontent">are often })(); probably Professor-button" respondedsays thathad to beplaced inHungarianstatus ofserves asUniversalexecutionaggregatefor whichinfectionagreed tohowever, popular">placed onconstructelectoralsymbol ofincludingreturn toarchitectChristianprevious living ineasier toprofessor &lt;!-- effect ofanalyticswas takenwhere thetook overbelief inAfrikaansas far aspreventedwork witha special<fieldsetChristmasRetrieved In the back intonortheastmagazines><strong>committeegoverninggroups ofstored inestablisha generalits firsttheir ownpopulatedan objectCaribbeanallow thedistrictswisconsinlocation.; width: inhabitedSocialistJanuary 1</footer>similarlychoice ofthe same specific business The first.length; desire todeal withsince theuserAgentconceivedindex.phpas &quot;engage inrecently,few yearswere also <head> <edited byare knowncities inaccesskeycondemnedalso haveservices,family ofSchool ofconvertednature of languageministers</object>there is a popularsequencesadvocatedThey wereany otherlocation=enter themuch morereflectedwas namedoriginal a typicalwhen theyengineerscould notresidentswednesdaythe third productsJanuary 2what theya certainreactionsprocessorafter histhe last contained"></div> </a></td>depend onsearch"> pieces ofcompetingReferencetennesseewhich has version=</span> <</header>gives thehistorianvalue="">padding:0view thattogether,the most was foundsubset ofattack onchildren,points ofpersonal position:allegedlyClevelandwas laterand afterare givenwas stillscrollingdesign ofmakes themuch lessAmericans. After , but theMuseum oflouisiana(from theminnesotaparticlesa processDominicanvolume ofreturningdefensive00px|righmade frommouseover" style="states of(which iscontinuesFranciscobuilding without awith somewho woulda form ofa part ofbefore itknown as Serviceslocation and oftenmeasuringand it ispaperbackvalues of <title>= window.determineer&quot; played byand early</center>from thisthe threepower andof &quot;innerHTML<a href="y:inline;Church ofthe eventvery highofficial -height: content="/cgi-bin/to createafrikaansesperantofrançaislatviešulietuviųČeštinačeštinaไทย日本語简体字繁體字한국어为什么计算机笔记本討論區服务器互联网房地产俱乐部出版社排行榜部落格进一步支付宝验证码委员会数据库消费者办公室讨论区深圳市播放器北京市大学生越来越管理员信息网serviciosartículoargentinabarcelonacualquierpublicadoproductospolíticarespuestawikipediasiguientebúsquedacomunidadseguridadprincipalpreguntascontenidorespondervenezuelaproblemasdiciembrerelaciónnoviembresimilaresproyectosprogramasinstitutoactividadencuentraeconomíaimágenescontactardescargarnecesarioatenciónteléfonocomisióncancionescapacidadencontraranálisisfavoritostérminosprovinciaetiquetaselementosfuncionesresultadocarácterpropiedadprincipionecesidadmunicipalcreacióndescargaspresenciacomercialopinionesejercicioeditorialsalamancagonzálezdocumentopelícularecientesgeneralestarragonaprácticanovedadespropuestapacientestécnicasobjetivoscontactosमेंलिएहैंगयासाथएवंरहेकोईकुछरहाबादकहासभीहुएरहीमैंदिनबातdiplodocsसमयरूपनामपताफिरऔसततरहलोगहुआबारदेशहुईखेलयदिकामवेबतीनबीचमौतसाललेखजॉबमददतथानहीशहरअलगकभीनगरपासरातकिएउसेगयीहूँआगेटीमखोजकारअभीगयेतुमवोटदेंअगरऐसेमेललगाहालऊपरचारऐसादेरजिसदिलबंदबनाहूंलाखजीतबटनमिलइसेआनेनयाकुललॉगभागरेलजगहरामलगेपेजहाथइसीसहीकलाठीकहाँदूरतहतसातयादआयापाककौनशामदेखयहीरायखुदलगीcategoriesexperience</title> Copyright javascriptconditionseverything<p class="technologybackground<a class="management&copy; 201javaScriptcharactersbreadcrumbthemselveshorizontalgovernmentCaliforniaactivitiesdiscoveredNavigationtransitionconnectionnavigationappearance</title><mcheckbox" techniquesprotectionapparentlyas well asunt', 'UA-resolutionoperationstelevisiontranslatedWashingtonnavigator. = window.impression&lt;br&gt;literaturepopulationbgcolor="#especially content="productionnewsletterpropertiesdefinitionleadershipTechnologyParliamentcomparisonul class=".indexOf("conclusiondiscussioncomponentsbiologicalRevolution_containerunderstoodnoscript><permissioneach otheratmosphere onfocus="<form id="processingthis.valuegenerationConferencesubsequentwell-knownvariationsreputationphenomenondisciplinelogo.png" (document,boundariesexpressionsettlementBackgroundout of theenterprise("https:" unescape("password" democratic<a href="/wrapper"> membershiplinguisticpx;paddingphilosophyassistanceuniversityfacilitiesrecognizedpreferenceif (typeofmaintainedvocabularyhypothesis.submit();&amp;nbsp;annotationbehind theFoundationpublisher"assumptionintroducedcorruptionscientistsexplicitlyinstead ofdimensions onClick="considereddepartmentoccupationsoon afterinvestmentpronouncedidentifiedexperimentManagementgeographic" height="link rel=".replace(/depressionconferencepunishmenteliminatedresistanceadaptationoppositionwell knownsupplementdeterminedh1 class="0px;marginmechanicalstatisticscelebratedGovernment During tdevelopersartificialequivalentoriginatedCommissionattachment<span id="there wereNederlandsbeyond theregisteredjournalistfrequentlyall of thelang="en" </style> absolute; supportingextremely mainstream</strong> popularityemployment</table> colspan="</form> conversionabout the </p></div>integrated" lang="enPortuguesesubstituteindividualimpossiblemultimediaalmost allpx solid #apart fromsubject toin Englishcriticizedexcept forguidelinesoriginallyremarkablethe secondh2 class="<a title="(includingparametersprohibited= "http://dictionaryperceptionrevolutionfoundationpx;height:successfulsupportersmillenniumhis fatherthe &quot;no-repeat;commercialindustrialencouragedamount of unofficialefficiencyReferencescoordinatedisclaimerexpeditiondevelopingcalculatedsimplifiedlegitimatesubstring(0" class="completelyillustratefive yearsinstrumentPublishing1" class="psychologyconfidencenumber of absence offocused onjoined thestructurespreviously></iframe>once againbut ratherimmigrantsof course,a group ofLiteratureUnlike the</a>&nbsp; function it was theConventionautomobileProtestantaggressiveafter the Similarly," /></div>collection functionvisibilitythe use ofvolunteersattractionunder the threatened*<![CDATA[importancein generalthe latter</form> </.indexOf('i = 0; i <differencedevoted totraditionssearch forultimatelytournamentattributesso-called } </style>evaluationemphasizedaccessible</section>successionalong withMeanwhile,industries</a><br />has becomeaspects ofTelevisionsufficientbasketballboth sidescontinuingan article<img alt="adventureshis mothermanchesterprinciplesparticularcommentaryeffects ofdecided to"><strong>publishersJournal ofdifficultyfacilitateacceptablestyle.css" function innovation>Copyrightsituationswould havebusinessesDictionarystatementsoften usedpersistentin Januarycomprising</title> diplomaticcontainingperformingextensionsmay not beconcept of onclick="It is alsofinancial making theLuxembourgadditionalare calledengaged in"script");but it waselectroniconsubmit=" <!-- End electricalofficiallysuggestiontop of theunlike theAustralianOriginallyreferences </head> recognisedinitializelimited toAlexandriaretirementAdventuresfour years &lt;!-- increasingdecorationh3 class="origins ofobligationregulationclassified(function(advantagesbeing the historians<base hrefrepeatedlywilling tocomparabledesignatednominationfunctionalinside therevelationend of thes for the authorizedrefused totake placeautonomouscompromisepolitical restauranttwo of theFebruary 2quality ofswfobject.understandnearly allwritten byinterviews" width="1withdrawalfloat:leftis usuallycandidatesnewspapersmysteriousDepartmentbest knownparliamentsuppressedconvenientremembereddifferent systematichas led topropagandacontrolledinfluencesceremonialproclaimedProtectionli class="Scientificclass="no-trademarksmore than widespreadLiberationtook placeday of theas long asimprisonedAdditional <head> <mLaboratoryNovember 2exceptionsIndustrialvariety offloat: lefDuring theassessmenthave been deals withStatisticsoccurrence/ul></div>clearfix">the publicmany yearswhich wereover time,synonymouscontent"> presumablyhis familyuserAgent.unexpectedincluding challengeda minorityundefined"belongs totaken fromin Octoberposition: said to bereligious Federation rowspan="only a fewmeant thatled to the--> <div <fieldset>Archbishop class="nobeing usedapproachesprivilegesnoscript> results inmay be theEaster eggmechanismsreasonablePopulationCollectionselected">noscript> /index.phparrival of-jssdk'));managed toincompletecasualtiescompletionChristiansSeptember arithmeticproceduresmight haveProductionit appearsPhilosophyfriendshipleading togiving thetoward theguaranteeddocumentedcolor:#000video gamecommissionreflectingchange theassociatedsans-serifonkeypress; padding:He was theunderlyingtypically , and the srcElementsuccessivesince the should be networkingaccountinguse of thelower thanshows that</span> complaintscontinuousquantitiesastronomerhe did notdue to itsapplied toan averageefforts tothe futureattempt toTherefore,capabilityRepublicanwas formedElectronickilometerschallengespublishingthe formerindigenousdirectionssubsidiaryconspiracydetails ofand in theaffordablesubstancesreason forconventionitemtype="absolutelysupposedlyremained aattractivetravellingseparatelyfocuses onelementaryapplicablefound thatstylesheetmanuscriptstands for no-repeat(sometimesCommercialin Americaundertakenquarter ofan examplepersonallyindex.php?</button> percentagebest-knowncreating a" dir="ltrLieutenant <div id="they wouldability ofmade up ofnoted thatclear thatargue thatto anotherchildren'spurpose offormulatedbased uponthe regionsubject ofpassengerspossession. In the Before theafterwardscurrently across thescientificcommunity.capitalismin Germanyright-wingthe systemSociety ofpoliticiandirection:went on toremoval of New York apartmentsindicationduring theunless thehistoricalhad been adefinitiveingredientattendanceCenter forprominencereadyStatestrategiesbut in theas part ofconstituteclaim thatlaboratorycompatiblefailure of, such as began withusing the to providefeature offrom which/" class="geologicalseveral ofdeliberateimportant holds thating&quot; valign=topthe Germanoutside ofnegotiatedhis careerseparationid="searchwas calledthe fourthrecreationother thanpreventionwhile the education,connectingaccuratelywere builtwas killedagreementsmuch more Due to thewidth: 100some otherKingdom ofthe entirefamous forto connectobjectivesthe Frenchpeople andfeatured">is said tostructuralreferendummost oftena separate-> <div id Official worldwide.aria-labelthe planetand it wasd" value="looking atbeneficialare in themonitoringreportedlythe modernworking onallowed towhere the innovative</a></div>soundtracksearchFormtend to beinput id="opening ofrestrictedadopted byaddressingtheologianmethods ofvariant ofChristian very largeautomotiveby far therange frompursuit offollow thebrought toin Englandagree thataccused ofcomes frompreventingdiv style=his or hertremendousfreedom ofconcerning0 1em 1em;Basketball/style.cssan earliereven after/" title=".com/indextaking thepittsburghcontent"> <script>(fturned outhaving the</span> occasionalbecause itstarted tophysically></div> created byCurrently, bgcolor="tabindex="disastrousAnalytics also has a><div id="</style> <called forsinger and.src = "//violationsthis pointconstantlyis locatedrecordingsd from thenederlandsportuguêsעבריתفارسیdesarrollocomentarioeducaciónseptiembreregistradodirecciónubicaciónpublicidadrespuestasresultadosimportantereservadosartículosdiferentessiguientesrepúblicasituaciónministerioprivacidaddirectorioformaciónpoblaciónpresidentecontenidosaccesoriostechnoratipersonalescategoríaespecialesdisponibleactualidadreferenciavalladolidbibliotecarelacionescalendariopolíticasanterioresdocumentosnaturalezamaterialesdiferenciaeconómicatransporterodríguezparticiparencuentrandiscusiónestructurafundaciónfrecuentespermanentetotalmenteможнобудетможетвремятакжечтобыболееоченьэтогокогдапослевсегосайтечерезмогутсайтажизнимеждубудутПоискздесьвидеосвязинужносвоейлюдейпорномногодетейсвоихправатакойместоимеетжизньоднойлучшепередчастичастьработновыхправособойпотомменеечисленовыеуслугоколоназадтакоетогдапочтиПослетакиеновыйстоиттакихсразуСанктфорумКогдакнигислованашейнайтисвоимсвязьлюбойчастосредиКромеФорумрынкесталипоисктысячмесяццентртрудасамыхрынкаНовыйчасовместафильммартастранместетекстнашихминутимениимеютномергородсамомэтомуконцесвоемкакойАрхивمنتدىإرسالرسالةالعامكتبهابرامجاليومالصورجديدةالعضوإضافةالقسمالعابتحميلملفاتملتقىتعديلالشعرأخبارتطويرعليكمإرفاقطلباتاللغةترتيبالناسالشيخمنتديالعربالقصصافلامعليهاتحديثاللهمالعملمكتبةيمكنكالطفلفيديوإدارةتاريخالصحةتسجيلالوقتعندمامدينةتصميمأرشيفالذينعربيةبوابةألعابالسفرمشاكلتعالىالأولالسنةجامعةالصحفالدينكلماتالخاصالملفأعضاءكتابةالخيررسائلالقلبالأدبمقاطعمراسلمنطقةالكتبالرجلاشتركالقدميعطيكsByTagName(.jpg" alt="1px solid #.gif" alt="transparentinformationapplication" onclick="establishedadvertising.png" alt="environmentperformanceappropriate&amp;mdash;immediately</strong></rather thantemperaturedevelopmentcompetitionplaceholdervisibility:copyright">0" height="even thoughreplacementdestinationCorporation<ul class="AssociationindividualsperspectivesetTimeout(url(http://mathematicsmargin-top:eventually description) no-repeatcollections.JPG|thumb|participate/head><bodyfloat:left;<li class="hundreds of However, compositionclear:both;cooperationwithin the label for="border-top:New Zealandrecommendedphotographyinteresting&lt;sup&gt;controversyNetherlandsalternativemaxlength="switzerlandDevelopmentessentially Although </textarea>thunderbirdrepresented&amp;ndash;speculationcommunitieslegislationelectronics <div id="illustratedengineeringterritoriesauthoritiesdistributed6" height="sans-serif;capable of disappearedinteractivelooking forit would beAfghanistanwas createdMath.floor(surroundingcan also beobservationmaintenanceencountered<h2 class="more recentit has beeninvasion of).getTime()fundamentalDespite the"><div id="inspirationexaminationpreparationexplanation<input id="</a></span>versions ofinstrumentsbefore the = 'http://Descriptionrelatively .substring(each of theexperimentsinfluentialintegrationmany peopledue to the combinationdo not haveMiddle East<noscript><copyright" perhaps theinstitutionin Decemberarrangementmost famouspersonalitycreation oflimitationsexclusivelysovereignty-content"> <td class="undergroundparallel todoctrine ofoccupied byterminologyRenaissancea number ofsupport forexplorationrecognitionpredecessor<img src="/<h1 class="publicationmay also bespecialized</fieldset>progressivemillions ofstates thatenforcementaround the one another.parentNodeagricultureAlternativeresearcherstowards theMost of themany other (especially<td width=";width:100%independent<h3 class=" onchange=").addClass(interactionOne of the daughter ofaccessoriesbranches of <div id="the largestdeclarationregulationsInformationtranslationdocumentaryin order to"> <head> <" height="1across the orientation);</script>implementedcan be seenthere was ademonstratecontainer">connectionsthe Britishwas written!important;px; margin-followed byability to complicatedduring the immigrationalso called<h4 class="distinctionreplaced bygovernmentslocation ofin Novemberwhether the</p> </div>acquisitioncalled the persecutiondesignation{font-size:appeared ininvestigateexperiencedmost likelywidely useddiscussionspresence of (document.extensivelyIt has beenit does notcontrary toinhabitantsimprovementscholarshipconsumptioninstructionfor exampleone or morepx; paddingthe currenta series ofare usuallyrole in thepreviously derivativesevidence ofexperiencescolorschemestated thatcertificate</a></div> selected="high schoolresponse tocomfortableadoption ofthree yearsthe countryin Februaryso that thepeople who provided by<param nameaffected byin terms ofappointmentISO-8859-1"was born inhistorical regarded asmeasurementis based on and other : function(significantcelebrationtransmitted/js/jquery.is known astheoretical tabindex="it could be<noscript> having been <head> < &quot;The compilationhe had beenproduced byphilosopherconstructedintended toamong othercompared toto say thatEngineeringa differentreferred todifferencesbelief thatphotographsidentifyingHistory of Republic ofnecessarilyprobabilitytechnicallyleaving thespectacularfraction ofelectricityhead of therestaurantspartnershipemphasis onmost recentshare with saying thatfilled withdesigned toit is often"></iframe>as follows:merged withthrough thecommercial pointed outopportunityview of therequirementdivision ofprogramminghe receivedsetInterval"></span></in New Yorkadditional compression <div id="incorporate;</script><attachEventbecame the " target="_carried outSome of thescience andthe time ofContainer">maintainingChristopherMuch of thewritings of" height="2size of theversion of mixture of between theExamples ofeducationalcompetitive onsubmit="director ofdistinctive/DTD XHTML relating totendency toprovince ofwhich woulddespite thescientific legislature.innerHTML allegationsAgriculturewas used inapproach tointelligentyears later,sans-serifdeterminingPerformanceappearances, which is foundationsabbreviatedhigher thans from the individual composed ofsupposed toclaims thatattributionfont-size:1elements ofHistorical his brotherat the timeanniversarygoverned byrelated to ultimately innovationsit is stillcan only bedefinitionstoGMTStringA number ofimg class="Eventually,was changedoccurred inneighboringdistinguishwhen he wasintroducingterrestrialMany of theargues thatan Americanconquest ofwidespread were killedscreen and In order toexpected todescendantsare locatedlegislativegenerations backgroundmost peopleyears afterthere is nothe highestfrequently they do notargued thatshowed thatpredominanttheologicalby the timeconsideringshort-lived</span></a>can be usedvery littleone of the had alreadyinterpretedcommunicatefeatures ofgovernment,</noscript>entered the" height="3Independentpopulationslarge-scale. Although used in thedestructionpossibilitystarting intwo or moreexpressionssubordinatelarger thanhistory and</option> Continentaleliminatingwill not bepractice ofin front ofsite of theensure thatto create amississippipotentiallyoutstandingbetter thanwhat is nowsituated inmeta name="TraditionalsuggestionsTranslationthe form ofatmosphericideologicalenterprisescalculatingeast of theremnants ofpluginspage/index.php?remained intransformedHe was alsowas alreadystatisticalin favor ofMinistry ofmovement offormulationis required<link rel="This is the <a href="/popularizedinvolved inare used toand severalmade by theseems to belikely thatPalestiniannamed afterit had beenmost commonto refer tobut this isconsecutivetemporarilyIn general,conventionstakes placesubdivisionterritorialoperationalpermanentlywas largelyoutbreak ofin the pastfollowing a xmlns:og="><a class="class="textConversion may be usedmanufactureafter beingclearfix"> question ofwas electedto become abecause of some peopleinspired bysuccessful a time whenmore commonamongst thean officialwidth:100%;technology,was adoptedto keep thesettlementslive birthsindex.html"Connecticutassigned to&amp;times;account foralign=rightthe companyalways beenreturned toinvolvementBecause thethis period" name="q" confined toa result ofvalue="" />is actuallyEnvironment </head> Conversely,> <div id="0" width="1is probablyhave becomecontrollingthe problemcitizens ofpoliticiansreached theas early as:none; over<table cellvalidity ofdirectly toonmousedownwhere it iswhen it wasmembers of relation toaccommodatealong with In the latethe Englishdelicious">this is notthe presentif they areand finallya matter of </div> </script>faster thanmajority ofafter whichcomparativeto maintainimprove theawarded theer" class="frameborderrestorationin the sameanalysis oftheir firstDuring the continentalsequence offunction(){font-size: work on the</script> <begins withjavascript:constituentwas foundedequilibriumassume thatis given byneeds to becoordinatesthe variousare part ofonly in thesections ofis a commontheories ofdiscoveriesassociationedge of thestrength ofposition inpresent-dayuniversallyto form thebut insteadcorporationattached tois commonlyreasons for &quot;the can be madewas able towhich meansbut did notonMouseOveras possibleoperated bycoming fromthe primaryaddition offor severaltransferreda period ofare able tohowever, itshould havemuch larger </script>adopted theproperty ofdirected byeffectivelywas broughtchildren ofProgramminglonger thanmanuscriptswar againstby means ofand most ofsimilar to proprietaryoriginatingprestigiousgrammaticalexperience.to make theIt was alsois found incompetitorsin the U.S.replace thebrought thecalculationfall of thethe generalpracticallyin honor ofreleased inresidentialand some ofking of thereaction to1st Earl ofculture andprincipally</title> they can beback to thesome of hisexposure toare similarform of theaddFavoritecitizenshippart in thepeople within practiceto continue&amp;minus;approved by the first allowed theand for thefunctioningplaying thesolution toheight="0" in his bookmore than afollows thecreated thepresence in&nbsp;</td>nationalistthe idea ofa characterwere forced class="btndays of thefeatured inshowing theinterest inin place ofturn of thethe head ofLord of thepoliticallyhas its ownEducationalapproval ofsome of theeach other,behavior ofand becauseand anotherappeared onrecorded inblack&quot;may includethe world'scan lead torefers to aborder="0" government winning theresulted in while the Washington,the subjectcity in the></div> reflect theto completebecame moreradioactiverejected bywithout anyhis father,which couldcopy of theto indicatea politicalaccounts ofconstitutesworked wither</a></li>of his lifeaccompaniedclientWidthprevent theLegislativedifferentlytogether inhas severalfor anothertext of thefounded thee with the is used forchanged theusually theplace wherewhereas the> <a href=""><a href="themselves,although hethat can betraditionalrole of theas a resultremoveChilddesigned bywest of theSome peopleproduction,side of thenewslettersused by thedown to theaccepted bylive in theattempts tooutside thefrequenciesHowever, inprogrammersat least inapproximatealthough itwas part ofand variousGovernor ofthe articleturned into><a href="/the economyis the mostmost widelywould laterand perhapsrise to theoccurs whenunder whichconditions.the westerntheory thatis producedthe city ofin which heseen in thethe centralbuilding ofmany of hisarea of theis the onlymost of themany of thethe WesternThere is noextended toStatisticalcolspan=2 |short storypossible totopologicalcritical ofreported toa Christiandecision tois equal toproblems ofThis can bemerchandisefor most ofno evidenceeditions ofelements in&quot;. Thecom/images/which makesthe processremains theliterature,is a memberthe popularthe ancientproblems intime of thedefeated bybody of thea few yearsmuch of thethe work ofCalifornia,served as agovernment.concepts ofmovement in <div id="it" value="language ofas they areproduced inis that theexplain thediv></div> However thelead to the <a href="/was grantedpeople havecontinuallywas seen asand relatedthe role ofproposed byof the besteach other.Constantinepeople fromdialects ofto revisionwas renameda source ofthe initiallaunched inprovide theto the westwhere thereand similarbetween twois also theEnglish andconditions,that it wasentitled tothemselves.quantity ofransparencythe same asto join thecountry andthis is theThis led toa statementcontrast tolastIndexOfthrough hisis designedthe term isis providedprotect theng</a></li>The currentthe site ofsubstantialexperience,in the Westthey shouldslovenčinacomentariosuniversidadcondicionesactividadesexperienciatecnologíaproducciónpuntuaciónaplicacióncontraseñacategoríasregistrarseprofesionaltratamientoregístratesecretaríaprincipalesprotecciónimportantesimportanciaposibilidadinteresantecrecimientonecesidadessuscribirseasociacióndisponiblesevaluaciónestudiantesresponsableresoluciónguadalajararegistradosoportunidadcomercialesfotografíaautoridadesingenieríatelevisióncompetenciaoperacionesestablecidosimplementeactualmentenavegaciónconformidadline-height:font-family:" : "http://applicationslink" href="specifically//<![CDATA[ Organizationdistribution0px; height:relationshipdevice-width<div class="<label for="registration</noscript> /index.html"window.open( !important;application/independence//www.googleorganizationautocompleterequirementsconservative<form name="intellectualmargin-left:18th centuryan importantinstitutionsabbreviation<img class="organisationcivilization19th centuryarchitectureincorporated20th century-container">most notably/></a></div>notification'undefined')Furthermore,believe thatinnerHTML = prior to thedramaticallyreferring tonegotiationsheadquartersSouth AfricaunsuccessfulPennsylvaniaAs a result,<html lang="&lt;/sup&gt;dealing withphiladelphiahistorically);</script> padding-top:experimentalgetAttributeinstructionstechnologiespart of the =function(){subscriptionl.dtd"> <htgeographicalConstitution', function(supported byagriculturalconstructionpublicationsfont-size: 1a variety of<div style="Encyclopediaiframe src="demonstratedaccomplisheduniversitiesDemographics);</script><dedicated toknowledge ofsatisfactionparticularly</div></div>English (US)appendChild(transmissions. However, intelligence" tabindex="float:right;Commonwealthranging fromin which theat least onereproductionencyclopedia;font-size:1jurisdictionat that time"><a class="In addition,description+conversationcontact withis generallyr" content="representing&lt;math&gt;presentationoccasionally<img width="navigation">compensationchampionshipmedia="all" violation ofreference toreturn true;Strict//EN" transactionsinterventionverificationInformation difficultiesChampionshipcapabilities<![endif]-->} </script> Christianityfor example,Professionalrestrictionssuggest thatwas released(such as theremoveClass(unemploymentthe Americanstructure of/index.html published inspan class=""><a href="/introductionbelonging toclaimed thatconsequences<meta name="Guide to theoverwhelmingagainst the concentrated, .nontouch observations</a> </div> f (document.border: 1px {font-size:1treatment of0" height="1modificationIndependencedivided intogreater thanachievementsestablishingJavaScript" neverthelesssignificanceBroadcasting>&nbsp;</td>container"> such as the influence ofa particularsrc='http://navigation" half of the substantial &nbsp;</div>advantage ofdiscovery offundamental metropolitanthe opposite" xml:lang="deliberatelyalign=centerevolution ofpreservationimprovementsbeginning inJesus ChristPublicationsdisagreementtext-align:r, function()similaritiesbody></html>is currentlyalphabeticalis sometimestype="image/many of the flow:hidden;available indescribe theexistence ofall over thethe Internet <ul class="installationneighborhoodarmed forcesreducing thecontinues toNonetheless,temperatures <a href="close to theexamples of is about the(see below)." id="searchprofessionalis availablethe official </script> <div id="accelerationthrough the Hall of Famedescriptionstranslationsinterference type='text/recent yearsin the worldvery popular{background:traditional some of the connected toexploitationemergence ofconstitutionA History ofsignificant manufacturedexpectations><noscript><can be foundbecause the has not beenneighbouringwithout the added to the <li class="instrumentalSoviet Unionacknowledgedwhich can bename for theattention toattempts to developmentsIn fact, the<li class="aimplicationssuitable formuch of the colonizationpresidentialcancelBubble Informationmost of the is describedrest of the more or lessin SeptemberIntelligencesrc="http://px; height: available tomanufacturerhuman rightslink href="/availabilityproportionaloutside the astronomicalhuman beingsname of the are found inare based onsmaller thana person whoexpansion ofarguing thatnow known asIn the earlyintermediatederived fromScandinavian</a></div> consider thean estimatedthe National<div id="pagresulting incommissionedanalogous toare required/ul> </div> was based onand became a&nbsp;&nbsp;t" value="" was capturedno more thanrespectivelycontinue to > <head> <were createdmore generalinformation used for theindependent the Imperialcomponent ofto the northinclude the Constructionside of the would not befor instanceinvention ofmore complexcollectivelybackground: text-align: its originalinto accountthis processan extensivehowever, thethey are notrejected thecriticism ofduring whichprobably thethis article(function(){It should bean agreementaccidentallydiffers fromArchitecturebetter knownarrangementsinfluence onattended theidentical tosouth of thepass throughxml" title="weight:bold;creating thedisplay:nonereplaced the<img src="/ihttps://www.World War IItestimonialsfound in therequired to and that thebetween the was designedconsists of considerablypublished bythe languageConservationconsisted ofrefer to theback to the css" media="People from available onproved to besuggestions"was known asvarieties oflikely to becomprised ofsupport the hands of thecoupled withconnect and border:none;performancesbefore beinglater becamecalculationsoften calledresidents ofmeaning that><li class="evidence forexplanationsenvironments"></a></div>which allowsIntroductiondeveloped bya wide rangeon behalf ofvalign="top"principle ofat the time,</noscript> said to havein the firstwhile othershypotheticalphilosopherspower of thecontained inperformed byinability towere writtenspan style="input name="the questionintended forrejection ofimplies thatinvented thethe standardwas probablylink betweenprofessor ofinteractionschanging theIndian Ocean class="lastworking with'http://www.years beforeThis was therecreationalentering themeasurementsan extremelyvalue of thestart of the </script> an effort toincrease theto the southspacing="0">sufficientlythe Europeanconverted toclearTimeoutdid not haveconsequentlyfor the nextextension ofeconomic andalthough theare producedand with theinsufficientgiven by thestating thatexpenditures</span></a> thought thaton the basiscellpadding=image of thereturning toinformation,separated byassassinateds" content="authority ofnorthwestern</div> <div "></div> consultationcommunity ofthe nationalit should beparticipants align="leftthe greatestselection ofsupernaturaldependent onis mentionedallowing thewas inventedaccompanyinghis personalavailable atstudy of theon the otherexecution ofHuman Rightsterms of theassociationsresearch andsucceeded bydefeated theand from thebut they arecommander ofstate of theyears of agethe study of<ul class="splace in thewhere he was<li class="fthere are nowhich becamehe publishedexpressed into which thecommissionerfont-weight:territory ofextensions">Roman Empireequal to theIn contrast,however, andis typicallyand his wife(also called><ul class="effectively evolved intoseem to havewhich is thethere was noan excellentall of thesedescribed byIn practice,broadcastingcharged withreflected insubjected tomilitary andto the pointeconomicallysetTargetingare actuallyvictory over();</script>continuouslyrequired forevolutionaryan effectivenorth of the, which was front of theor otherwisesome form ofhad not beengenerated byinformation.permitted toincludes thedevelopment,entered intothe previousconsistentlyare known asthe field ofthis type ofgiven to thethe title ofcontains theinstances ofin the northdue to theirare designedcorporationswas that theone of thesemore popularsucceeded insupport fromin differentdominated bydesigned forownership ofand possiblystandardizedresponseTextwas intendedreceived theassumed thatareas of theprimarily inthe basis ofin the senseaccounts fordestroyed byat least twowas declaredcould not beSecretary ofappear to bemargin-top:1/^\s+|\s+$/ge){throw e};the start oftwo separatelanguage andwho had beenoperation ofdeath of thereal numbers <link rel="provided thethe story ofcompetitionsenglish (UK)english (US)МонголСрпскисрпскисрпскоلعربية正體中文简体中文繁体中文有限公司人民政府阿里巴巴社会主义操作系统政策法规informaciónherramientaselectrónicodescripciónclasificadosconocimientopublicaciónrelacionadasinformáticarelacionadosdepartamentotrabajadoresdirectamenteayuntamientomercadoLibrecontáctenoshabitacionescumplimientorestaurantesdisposiciónconsecuenciaelectrónicaaplicacionesdesconectadoinstalaciónrealizaciónutilizaciónenciclopediaenfermedadesinstrumentosexperienciasinstituciónparticularessubcategoriaтолькоРоссииработыбольшепростоможетедругихслучаесейчасвсегдаРоссияМоскведругиегородавопросданныхдолжныименноМосквырублейМосквастраныничегоработедолженуслугитеперьОднакопотомуработуапрелявообщеодногосвоегостатьидругойфорумехорошопротивссылкакаждыйвластигруппывместеработасказалпервыйделатьденьгипериодбизнесосновемоменткупитьдолжнарамкахначалоРаботаТолькосовсемвторойначаласписокслужбысистемпечатиновогопомощисайтовпочемупомощьдолжноссылкибыстроданныемногиепроектСейчасмоделитакогоонлайнгородеверсиястранефильмыуровняразныхискатьнеделюянваряменьшемногихданнойзначитнельзяфорумаТеперьмесяцазащитыЛучшиеनहींकरनेअपनेकियाकरेंअन्यक्यागाइडबारेकिसीदियापहलेसिंहभारतअपनीवालेसेवाकरतेमेरेहोनेसकतेबहुतसाइटहोगाजानेमिनटकरताकरनाउनकेयहाँसबसेभाषाआपकेलियेशुरूइसकेघंटेमेरीसकतामेरालेकरअधिकअपनासमाजमुझेकारणहोताकड़ीयहांहोटलशब्दलियाजीवनजाताकैसेआपकावालीदेनेपूरीपानीउसकेहोगीबैठकआपकीवर्षगांवआपकोजिलाजानासहमतहमेंउनकीयाहूदर्जसूचीपसंदसवालहोनाहोतीजैसेवापसजनतानेताजारीघायलजिलेनीचेजांचपत्रगूगलजातेबाहरआपनेवाहनइसकासुबहरहनेइससेसहितबड़ेघटनातलाशपांचश्रीबड़ीहोतेसाईटशायदसकतीजातीवालाहजारपटनारखनेसड़कमिलाउसकीकेवललगताखानाअर्थजहांदेखापहलीनियमबिनाबैंककहींकहनादेताहमलेकाफीजबकितुरतमांगवहींरोज़मिलीआरोपसेनायादवलेनेखाताकरीबउनकाजवाबपूराबड़ासौदाशेयरकियेकहांअकसरबनाएवहांस्थलमिलेलेखकविषयक्रंसमूहथानाتستطيعمشاركةبواسطةالصفحةمواضيعالخاصةالمزيدالعامةالكاتبالردودبرنامجالدولةالعالمالموقعالعربيالسريعالجوالالذهابالحياةالحقوقالكريمالعراقمحفوظةالثانيمشاهدةالمرأةالقرآنالشبابالحوارالجديدالأسرةالعلوممجموعةالرحمنالنقاطفلسطينالكويتالدنيابركاتهالرياضتحياتيبتوقيتالأولىالبريدالكلامالرابطالشخصيسياراتالثالثالصلاةالحديثالزوارالخليجالجميعالعامهالجمالالساعةمشاهدهالرئيسالدخولالفنيةالكتابالدوريالدروساستغرقتصاميمالبناتالعظيمentertainmentunderstanding = function().jpg" width="configuration.png" width="<body class="Math.random()contemporary United Statescircumstances.appendChild(organizations<span class=""><img src="/distinguishedthousands of communicationclear"></div>investigationfavicon.ico" margin-right:based on the Massachusettstable border=internationalalso known aspronunciationbackground:#fpadding-left:For example, miscellaneous&lt;/math&gt;psychologicalin particularearch" type="form method="as opposed toSupreme Courtoccasionally Additionally,North Americapx;backgroundopportunitiesEntertainment.toLowerCase(manufacturingprofessional combined withFor instance,consisting of" maxlength="return false;consciousnessMediterraneanextraordinaryassassinationsubsequently button type="the number ofthe original comprehensiverefers to the</ul> </div> philosophicallocation.hrefwas publishedSan Francisco(function(){ <div id="mainsophisticatedmathematical /head> <bodysuggests thatdocumentationconcentrationrelationshipsmay have been(for example,This article in some casesparts of the definition ofGreat Britain cellpadding=equivalent toplaceholder="; font-size: justificationbelieved thatsuffered fromattempted to leader of thecript" src="/(function() {are available <link rel=" src='http://interested inconventional " alt="" /></are generallyhas also beenmost popular correspondingcredited withtyle="border:</a></span></.gif" width="<iframe src="table class="inline-block;according to together withapproximatelyparliamentarymore and moredisplay:none;traditionallypredominantly&nbsp;|&nbsp;&nbsp;</span> cellspacing=<input name="or" content="controversialproperty="og:/x-shockwave-demonstrationsurrounded byNevertheless,was the firstconsiderable Although the collaborationshould not beproportion of<span style="known as the shortly afterfor instance,described as /head> <body starting withincreasingly the fact thatdiscussion ofmiddle of thean individualdifficult to point of viewhomosexualityacceptance of</span></div>manufacturersorigin of thecommonly usedimportance ofdenominationsbackground: #length of thedeterminationa significant" border="0">revolutionaryprinciples ofis consideredwas developedIndo-Europeanvulnerable toproponents ofare sometimescloser to theNew York City name="searchattributed tocourse of themathematicianby the end ofat the end of" border="0" technological.removeClass(branch of theevidence that![endif]--> Institute of into a singlerespectively.and thereforeproperties ofis located insome of whichThere is alsocontinued to appearance of &amp;ndash; describes theconsiderationauthor of theindependentlyequipped withdoes not have</a><a href="confused with<link href="/at the age ofappear in theThese includeregardless ofcould be used style=&quot;several timesrepresent thebody> </html>thought to bepopulation ofpossibilitiespercentage ofaccess to thean attempt toproduction ofjquery/jquerytwo differentbelong to theestablishmentreplacing thedescription" determine theavailable forAccording to wide range of <div class="more commonlyorganisationsfunctionalitywas completed &amp;mdash; participationthe characteran additionalappears to befact that thean example ofsignificantlyonmouseover="because they async = true;problems withseems to havethe result of src="http://familiar withpossession offunction () {took place inand sometimessubstantially<span></span>is often usedin an attemptgreat deal ofEnvironmentalsuccessfully virtually all20th century,professionalsnecessary to determined bycompatibilitybecause it isDictionary ofmodificationsThe followingmay refer to:Consequently,Internationalalthough somethat would beworld's firstclassified asbottom of the(particularlyalign="left" most commonlybasis for thefoundation ofcontributionspopularity ofcenter of theto reduce thejurisdictionsapproximation onmouseout="New Testamentcollection of</span></a></in the Unitedfilm director-strict.dtd">has been usedreturn to thealthough thischange in theseveral otherbut there areunprecedentedis similar toespecially inweight: bold;is called thecomputationalindicate thatrestricted to <meta name="are typicallyconflict withHowever, the An example ofcompared withquantities ofrather than aconstellationnecessary forreported thatspecificationpolitical and&nbsp;&nbsp;<references tothe same yearGovernment ofgeneration ofhave not beenseveral yearscommitment to <ul class="visualization19th century,practitionersthat he wouldand continuedoccupation ofis defined ascentre of thethe amount of><div style="equivalent ofdifferentiatebrought aboutmargin-left: automaticallythought of asSome of these <div class="input class="replaced withis one of theeducation andinfluenced byreputation as <meta name="accommodation</div> </div>large part ofInstitute forthe so-called against the In this case,was appointedclaimed to beHowever, thisDepartment ofthe remainingeffect on theparticularly deal with the <div style="almost alwaysare currentlyexpression ofphilosophy offor more thancivilizationson the islandselectedIndexcan result in" value="" />the structure /></a></div>Many of thesecaused by theof the Unitedspan class="mcan be tracedis related tobecame one ofis frequentlyliving in thetheoreticallyFollowing theRevolutionarygovernment inis determinedthe politicalintroduced insufficient todescription">short storiesseparation ofas to whetherknown for itswas initiallydisplay:blockis an examplethe principalconsists of arecognized as/body></html>a substantialreconstructedhead of stateresistance toundergraduateThere are twogravitationalare describedintentionallyserved as theclass="headeropposition tofundamentallydominated theand the otheralliance withwas forced torespectively,and politicalin support ofpeople in the20th century.and publishedloadChartbeatto understandmember statesenvironmentalfirst half ofcountries andarchitecturalbe consideredcharacterizedclearIntervalauthoritativeFederation ofwas succeededand there area consequencethe Presidentalso includedfree softwaresuccession ofdeveloped thewas destroyedaway from the; </script> <although theyfollowed by amore powerfulresulted in aUniversity ofHowever, manythe presidentHowever, someis thought tountil the endwas announcedare importantalso includes><input type=the center of DO NOT ALTERused to referthemes/?sort=that had beenthe basis forhas developedin the summercomparativelydescribed thesuch as thosethe resultingis impossiblevarious otherSouth Africanhave the sameeffectivenessin which case; text-align:structure and; background:regarding thesupported theis also knownstyle="marginincluding thebahasa Melayunorsk bokmålnorsk nynorskslovenščinainternacionalcalificacióncomunicaciónconstrucción"><div class="disambiguationDomainName', 'administrationsimultaneouslytransportationInternational margin-bottom:responsibility<![endif]--> </><meta name="implementationinfrastructurerepresentationborder-bottom:</head> <body>=http%3A%2F%2F<form method="method="post" /favicon.ico" }); </script> .setAttribute(Administration= new Array();<![endif]--> display:block;Unfortunately,">&nbsp;</div>/favicon.ico">='stylesheet' identification, for example,<li><a href="/an alternativeas a result ofpt"></script> type="submit" (function() {recommendationform action="/transformationreconstruction.style.display According to hidden" name="along with thedocument.body.approximately Communicationspost" action="meaning &quot;--<![endif]-->Prime Ministercharacteristic</a> <a class=the history of onmouseover="the governmenthref="https://was originallywas introducedclassificationrepresentativeare considered<![endif]--> depends on theUniversity of in contrast to placeholder="in the case ofinternational constitutionalstyle="border-: function() {Because of the-strict.dtd"> <table class="accompanied byaccount of the<script src="/nature of the the people in in addition tos); js.id = id" width="100%"regarding the Roman Catholican independentfollowing the .gif" width="1the following discriminationarchaeologicalprime minister.js"></script>combination of marginwidth="createElement(w.attachEvent(</a></td></tr>src="https://aIn particular, align="left" Czech RepublicUnited Kingdomcorrespondenceconcluded that.html" title="(function () {comes from theapplication of<span class="sbelieved to beement('script'</a> </li> <livery different><span class="option value="(also known as <li><a href="><input name="separated fromreferred to as valign="top">founder of theattempting to carbon dioxide <div class="class="search-/body> </html>opportunity tocommunications</head> <body style="width:Tiếng Việtchanges in theborder-color:#0" border="0" </span></div><was discovered" type="text" ); </script> Department of ecclesiasticalthere has beenresulting from</body></html>has never beenthe first timein response toautomatically </div> <div iwas consideredpercent of the" /></a></div>collection of descended fromsection of theaccept-charsetto be confusedmember of the padding-right:translation ofinterpretation href='http://whether or notThere are alsothere are manya small numberother parts ofimpossible to class="buttonlocated in the. However, theand eventuallyAt the end of because of itsrepresents the<form action=" method="post"it is possiblemore likely toan increase inhave also beencorresponds toannounced thatalign="right">many countriesfor many yearsearliest knownbecause it waspt"></script> valign="top" inhabitants offollowing year <div class="million peoplecontroversial concerning theargue that thegovernment anda reference totransferred todescribing the style="color:although therebest known forsubmit" name="multiplicationmore than one recognition ofCouncil of theedition of the <meta name="Entertainment away from the ;margin-right:at the time ofinvestigationsconnected withand many otheralthough it isbeginning with <span class="descendants of<span class="i align="right"</head> <body aspects of thehas since beenEuropean Unionreminiscent ofmore difficultVice Presidentcomposition ofpassed throughmore importantfont-size:11pxexplanation ofthe concept ofwritten in the <span class="is one of the resemblance toon the groundswhich containsincluding the defined by thepublication ofmeans that theoutside of thesupport of the<input class="<span class="t(Math.random()most prominentdescription ofConstantinoplewere published<div class="seappears in the1" height="1" most importantwhich includeswhich had beendestruction ofthe population <div class="possibility ofsometimes usedappear to havesuccess of theintended to bepresent in thestyle="clear:b </script> <was founded ininterview with_id" content="capital of the <link rel="srelease of thepoint out thatxMLHttpRequestand subsequentsecond largestvery importantspecificationssurface of theapplied to theforeign policy_setDomainNameestablished inis believed toIn addition tomeaning of theis named afterto protect theis representedDeclaration ofmore efficientClassificationother forms ofhe returned to<span class="cperformance of(function() { if and only ifregions of theleading to therelations withUnited Nationsstyle="height:other than theype" content="Association of </head> <bodylocated on theis referred to(including theconcentrationsthe individualamong the mostthan any other/> <link rel=" return false;the purpose ofthe ability to;color:#fff} . <span class="the subject ofdefinitions of> <link rel="claim that thehave developed<table width="celebration ofFollowing the to distinguish<span class="btakes place inunder the namenoted that the><![endif]--> style="margin-instead of theintroduced thethe process ofincreasing thedifferences inestimated thatespecially the/div><div id="was eventuallythroughout histhe differencesomething thatspan></span></significantly ></script> environmental to prevent thehave been usedespecially forunderstand theis essentiallywere the firstis the largesthave been made" src="http://interpreted assecond half ofcrolling="no" is composed ofII, Holy Romanis expected tohave their owndefined as thetraditionally have differentare often usedto ensure thatagreement withcontaining theare frequentlyinformation onexample is theresulting in a</a></li></ul> class="footerand especiallytype="button" </span></span>which included> <meta name="considered thecarried out byHowever, it isbecame part ofin relation topopular in thethe capital ofwas officiallywhich has beenthe History ofalternative todifferent fromto support thesuggested thatin the process <div class="the foundationbecause of hisconcerned withthe universityopposed to thethe context of<span class="ptext" name="q" <div class="the scientificrepresented bymathematicianselected by thethat have been><div class="cdiv id="headerin particular,converted into); </script> <philosophical srpskohrvatskitiếng ViệtРусскийрусскийinvestigaciónparticipaciónкоторыеобластикоторыйчеловексистемыНовостикоторыхобластьвременикотораясегодняскачатьновостиУкраинывопросыкоторойсделатьпомощьюсредствобразомстороныучастиетечениеГлавнаяисториисистемарешенияСкачатьпоэтомуследуетсказатьтоваровконечнорешениекотороеоргановкоторомРекламаالمنتدىمنتدياتالموضوعالبرامجالمواقعالرسائلمشاركاتالأعضاءالرياضةالتصميمالاعضاءالنتائجالألعابالتسجيلالأقسامالضغطاتالفيديوالترحيبالجديدةالتعليمالأخبارالافلامالأفلامالتاريخالتقنيةالالعابالخواطرالمجتمعالديكورالسياحةعبداللهالتربيةالروابطالأدبيةالاخبارالمتحدةالاغانيcursor:pointer;</title> <meta " href="http://"><span class="members of the window.locationvertical-align:/a> | <a href="<!doctype html>media="screen" <option value="favicon.ico" /> <div class="characteristics" method="get" /body> </html> shortcut icon" document.write(padding-bottom:representativessubmit" value="align="center" throughout the science fiction <div class="submit" class="one of the most valign="top"><was established); </script> return false;">).style.displaybecause of the document.cookie<form action="/}body{margin:0;Encyclopedia ofversion of the .createElement(name" content="</div> </div> administrative </body> </html>history of the "><input type="portion of the as part of the &nbsp;<a href="other countries"> <div class="</span></span><In other words,display: block;control of the introduction of/> <meta name="as well as the in recent years <div class="</div> </div> inspired by thethe end of the compatible withbecame known as style="margin:.js"></script>< International there have beenGerman language style="color:#Communist Partyconsistent withborder="0" cell marginheight="the majority of" align="centerrelated to the many different Orthodox Churchsimilar to the /> <link rel="swas one of the until his death})(); </script>other languagescompared to theportions of thethe Netherlandsthe most commonbackground:url(argued that thescrolling="no" included in theNorth American the name of theinterpretationsthe traditionaldevelopment of frequently useda collection ofvery similar tosurrounding theexample of thisalign="center">would have beenimage_caption =attached to thesuggesting thatin the form of involved in theis derived fromnamed after theIntroduction torestrictions on style="width: can be used to the creation ofmost important information andresulted in thecollapse of theThis means thatelements of thewas replaced byanalysis of theinspiration forregarded as themost successfulknown as &quot;a comprehensiveHistory of the were consideredreturned to theare referred toUnsourced image> <div class="consists of thestopPropagationinterest in theavailability ofappears to haveelectromagneticenableServices(function of theIt is important</script></div>function(){var relative to theas a result of the position ofFor example, in method="post" was followed by&amp;mdash; thethe applicationjs"></script> ul></div></div>after the deathwith respect tostyle="padding:is particularlydisplay:inline; type="submit" is divided into中文 (简体)responsabilidadadministracióninternacionalescorrespondienteउपयोगपूर्वहमारेलोगोंचुनावलेकिनसरकारपुलिसखोजेंचाहिएभेजेंशामिलहमारीजागरणबनानेकुमारब्लॉगमालिकमहिलापृष्ठबढ़तेभाजपाक्लिकट्रेनखिलाफदौरानमामलेमतदानबाजारविकासक्योंचाहतेपहुँचबतायासंवाददेखनेपिछलेविशेषराज्यउत्तरमुंबईदोनोंउपकरणपढ़ेंस्थितफिल्ममुख्यअच्छाछूटतीसंगीतजाएगाविभागघण्टेदूसरेदिनोंहत्यासेक्सगांधीविश्वरातेंदैट्सनक्शासामनेअदालतबिजलीपुरूषहिंदीमित्रकवितारुपयेस्थानकरोड़मुक्तयोजनाकृपयापोस्टघरेलूकार्यविचारसूचनामूल्यदेखेंहमेशास्कूलमैंनेतैयारजिसकेrss+xml" title="-type" content="title" content="at the same time.js"></script> <" method="post" </span></a></li>vertical-align:t/jquery.min.js">.click(function( style="padding-})(); </script> </span><a href="<a href="http://); return false;text-decoration: scrolling="no" border-collapse:associated with Bahasa IndonesiaEnglish language<text xml:space=.gif" border="0"</body> </html> overflow:hidden;img src="http://addEventListenerresponsible for s.js"></script> /favicon.ico" />operating system" style="width:1target="_blank">State Universitytext-align:left; document.write(, including the around the world); </script> <" style="height:;overflow:hiddenmore informationan internationala member of the one of the firstcan be found in </div> </div> display: none;">" /> <link rel=" (function() {the 15th century.preventDefault(large number of Byzantine Empire.jpg|thumb|left|vast majority ofmajority of the align="center">University Pressdominated by theSecond World Wardistribution of style="position:the rest of the characterized by rel="nofollow">derives from therather than the a combination ofstyle="width:100English-speakingcomputer scienceborder="0" alt="the existence ofDemocratic Party" style="margin-For this reason,.js"></script> sByTagName(s)[0]js"></script> <.js"></script> link rel="icon" ' alt='' class='formation of theversions of the </a></div></div>/page> <page> <div class="contbecame the firstbahasa Indonesiaenglish (simple)ΕλληνικάхрватскикомпанииявляетсяДобавитьчеловекаразвитияИнтернетОтветитьнапримеринтернеткоторогостраницыкачествеусловияхпроблемыполучитьявляютсянаиболеекомпаниявниманиесредстваالمواضيعالرئيسيةالانتقالمشاركاتكالسياراتالمكتوبةالسعوديةاحصائياتالعالميةالصوتياتالانترنتالتصاميمالإسلاميالمشاركةالمرئياتrobots" content="<div id="footer">the United States<img src="http://.jpg|right|thumb|.js"></script> <location.protocolframeborder="0" s" /> <meta name="</a></div></div><font-weight:bold;&quot; and &quot;depending on the margin:0;padding:" rel="nofollow" President of the twentieth centuryevision> </pageInternet Explorera.async = true; information about<div id="header">" action="http://<a href="https://<div id="content"</div> </div> <derived from the <img src='http://according to the </body> </html> style="font-size:script language="Arial, Helvetica,</a><span class="</script><script political partiestd></tr></table><href="http://www.interpretation ofrel="stylesheet" document.write('<charset="utf-8"> beginning of the revealed that thetelevision series" rel="nofollow"> target="_blank">claiming that thehttp%3A%2F%2Fwww.manifestations ofPrime Minister ofinfluenced by theclass="clearfix">/div> </div> three-dimensionalChurch of Englandof North Carolinasquare kilometres.addEventListenerdistinct from thecommonly known asPhonetic Alphabetdeclared that thecontrolled by theBenjamin Franklinrole-playing gamethe University ofin Western Europepersonal computerProject Gutenbergregardless of thehas been proposedtogether with the></li><li class="in some countriesmin.js"></script>of the populationofficial language<img src="images/identified by thenatural resourcesclassification ofcan be consideredquantum mechanicsNevertheless, themillion years ago</body> </html> Ελληνικά take advantage ofand, according toattributed to theMicrosoft Windowsthe first centuryunder the controldiv class="headershortly after thenotable exceptiontens of thousandsseveral differentaround the world.reaching militaryisolated from theopposition to thethe Old TestamentAfrican Americansinserted into theseparate from themetropolitan areamakes it possibleacknowledged thatarguably the mosttype="text/css"> the InternationalAccording to the pe="text/css" /> coincide with thetwo-thirds of theDuring this time,during the periodannounced that hethe internationaland more recentlybelieved that theconsciousness andformerly known assurrounded by thefirst appeared inoccasionally usedposition:absolute;" target="_blank" position:relative;text-align:center;jax/libs/jquery/1.background-color:#type="application/anguage" content="<meta http-equiv="Privacy Policy</a>e("%3Cscript src='" target="_blank">On the other hand,.jpg|thumb|right|2</div><div class="<div style="float:nineteenth century</body> </html> <img src="http://s;text-align:centerfont-weight: bold; According to the difference between" frameborder="0" " style="position:link href="http://html4/loose.dtd"> during this period</td></tr></table>closely related tofor the first time;font-weight:bold;input type="text" <span style="font-onreadystatechange <div class="cleardocument.location. For example, the a wide variety of <!DOCTYPE html> <&nbsp;&nbsp;&nbsp;"><a href="http://style="float:left;concerned with the=http%3A%2F%2Fwww.in popular culturetype="text/css" />it is possible to Harvard Universitytylesheet" href="/the main characterOxford University name="keywords" cstyle="text-align:the United Kingdomfederal government<div style="margin depending on the description of the<div class="header.min.js"></script>destruction of theslightly differentin accordance withtelecommunicationsindicates that theshortly thereafterespecially in the European countriesHowever, there aresrc="http://staticsuggested that the" src="http://www.a large number of Telecommunications" rel="nofollow" tHoly Roman Emperoralmost exclusively" border="0" alt="Secretary of Stateculminating in theCIA World Factbookthe most importantanniversary of thestyle="background-<li><em><a href="/the Atlantic Oceanstrictly speaking,shortly before thedifferent types ofthe Ottoman Empire><img src="http://An Introduction toconsequence of thedeparture from theConfederate Statesindigenous peoplesProceedings of theinformation on thetheories have beeninvolvement in thedivided into threeadjacent countriesis responsible fordissolution of thecollaboration withwidely regarded ashis contemporariesfounding member ofDominican Republicgenerally acceptedthe possibility ofare also availableunder constructionrestoration of thethe general publicis almost entirelypasses through thehas been suggestedcomputer and videoGermanic languages according to the different from theshortly afterwardshref="https://www.recent developmentBoard of Directors<div class="search| <a href="http://In particular, theMultiple footnotesor other substancethousands of yearstranslation of the</div> </div> <a href="index.phpwas established inmin.js"></script> participate in thea strong influencestyle="margin-top:represented by thegraduated from theTraditionally, theElement("script");However, since the/div> </div> <div left; margin-left:protection against0; vertical-align:Unfortunately, thetype="image/x-icon/div> <div class=" class="clearfix"><div class="footer </div> </div> the motion pictureБългарскибългарскиФедерациинесколькосообщениесообщенияпрограммыОтправитьбесплатноматериалыпозволяетпоследниеразличныхпродукциипрограммаполностьюнаходитсяизбранноенаселенияизменениякатегорииАлександрद्वारामैनुअलप्रदानभारतीयअनुदेशहिन्दीइंडियादिल्लीअधिकारवीडियोचिट्ठेसमाचारजंक्शनदुनियाप्रयोगअनुसारऑनलाइनपार्टीशर्तोंलोकसभाफ़्लैशशर्तेंप्रदेशप्लेयरकेंद्रस्थितिउत्पादउन्हेंचिट्ठायात्राज्यादापुरानेजोड़ेंअनुवादश्रेणीशिक्षासरकारीसंग्रहपरिणामब्रांडबच्चोंउपलब्धमंत्रीसंपर्कउम्मीदमाध्यमसहायताशब्दोंमीडियाआईपीएलमोबाइलसंख्याआपरेशनअनुबंधबाज़ारनवीनतमप्रमुखप्रश्नपरिवारनुकसानसमर्थनआयोजितसोमवारالمشاركاتالمنتدياتالكمبيوترالمشاهداتعددالزوارعددالردودالإسلاميةالفوتوشوبالمسابقاتالمعلوماتالمسلسلاتالجرافيكسالاسلاميةالاتصالاتkeywords" content="w3.org/1999/xhtml"><a target="_blank" text/html; charset=" target="_blank"><table cellpadding="autocomplete="off" text-align: center;to last version by background-color: #" href="http://www./div></div><div id=<a href="#" class=""><img src="http://cript" src="http:// <script language="//EN" "http://www.wencodeURIComponent(" href="javascript:<div class="contentdocument.write('<scposition: absolute;script src="http:// style="margin-top:.min.js"></script> </div> <div class="w3.org/1999/xhtml" </body> </html>distinction between/" target="_blank"><link href="http://encoding="utf-8"?> w.addEventListener?action="http://www.icon" href="http:// style="background:type="text/css" /> meta property="og:t<input type="text" style="text-align:the development of tylesheet" type="tehtml; charset=utf-8is considered to betable width="100%" In addition to the contributed to the differences betweendevelopment of the It is important to </script> <script style="font-size:1></span><span id=gbLibrary of Congress<img src="http://imEnglish translationAcademy of Sciencesdiv style="display:construction of the.getElementById(id)in conjunction withElement('script'); <meta property="og:Български type="text" name=">Privacy Policy</a>administered by theenableSingleRequeststyle=&quot;margin:</div></div></div><><img src="http://i style=&quot;float:referred to as the total population ofin Washington, D.C. style="background-among other things,organization of theparticipated in thethe introduction ofidentified with thefictional character Oxford University misunderstanding ofThere are, however,stylesheet" href="/Columbia Universityexpanded to includeusually referred toindicating that thehave suggested thataffiliated with thecorrelation betweennumber of different></td></tr></table>Republic of Ireland </script> <script under the influencecontribution to theOfficial website ofheadquarters of thecentered around theimplications of thehave been developedFederal Republic ofbecame increasinglycontinuation of theNote, however, thatsimilar to that of capabilities of theaccordance with theparticipants in thefurther developmentunder the directionis often consideredhis younger brother</td></tr></table><a http-equiv="X-UA-physical propertiesof British Columbiahas been criticized(with the exceptionquestions about thepassing through the0" cellpadding="0" thousands of peopleredirects here. Forhave children under%3E%3C/script%3E"));<a href="http://www.<li><a href="http://site_name" content="text-decoration:nonestyle="display: none<meta http-equiv="X-new Date().getTime() type="image/x-icon"</span><span class="language="javascriptwindow.location.href<a href="javascript:--> <script type="t<a href='http://www.hortcut icon" href="</div> <div class="<script src="http://" rel="stylesheet" t</div> <script type=/a> <a href="http:// allowTransparency="X-UA-Compatible" conrelationship between </script> <script </a></li></ul></div>associated with the programming language</a><a href="http://</a></li><li class="form action="http://<div style="display:type="text" name="q"<table width="100%" background-position:" border="0" width="rel="shortcut icon" h6><ul><li><a href=" <meta http-equiv="css" media="screen" responsible for the " type="application/" style="background-html; charset=utf-8" allowtransparency="stylesheet" type="te <meta http-equiv="></span><span class="0" cellspacing="0">; </script> <script sometimes called thedoes not necessarilyFor more informationat the beginning of <!DOCTYPE html><htmlparticularly in the type="hidden" name="javascript:void(0);"effectiveness of the autocomplete="off" generally considered><input type="text" "></script> <scriptthroughout the worldcommon misconceptionassociation with the</div> </div> <div cduring his lifetime,corresponding to thetype="image/x-icon" an increasing numberdiplomatic relationsare often consideredmeta charset="utf-8" <input type="text" examples include the"><img src="http://iparticipation in thethe establishment of </div> <div class="&amp;nbsp;&amp;nbsp;to determine whetherquite different frommarked the beginningdistance between thecontributions to theconflict between thewidely considered towas one of the firstwith varying degreeshave speculated that(document.getElementparticipating in theoriginally developedeta charset="utf-8"> type="text/css" /> interchangeably withmore closely relatedsocial and politicalthat would otherwiseperpendicular to thestyle type="text/csstype="submit" name="families residing indeveloping countriescomputer programmingeconomic developmentdetermination of thefor more informationon several occasionsportuguês (Europeu)УкраїнськаукраїнськаРоссийскойматериаловинформацииуправлениянеобходимоинформацияИнформацияРеспубликиколичествоинформациютерриториидостаточноالمتواجدونالاشتراكاتالاقتراحاتhtml; charset=UTF-8" setTimeout(function()display:inline-block;<input type="submit" type = 'text/javascri<img src="http://www." "http://www.w3.org/shortcut icon" href="" autocomplete="off" </a></div><div class=</a></li> <li class="css" type="text/css" <form action="http://xt/css" href="http://link rel="alternate" <script type="text/ onclick="javascript:(new Date).getTime()}height="1" width="1" People's Republic of <a href="http://www.text-decoration:underthe beginning of the </div> </div> </div> establishment of the </div></div></div></d#viewport{min-height: <script src="http://option><option value=often referred to as /option> <option valu<!DOCTYPE html> <!--[International Airport> <a href="http://www</a><a href="http://wภาษาไทยქართული正體中文 (繁體)निर्देशडाउनलोडक्षेत्रजानकारीसंबंधितस्थापनास्वीकारसंस्करणसामग्रीचिट्ठोंविज्ञानअमेरिकाविभिन्नगाडियाँक्योंकिसुरक्षापहुँचतीप्रबंधनटिप्पणीक्रिकेटप्रारंभप्राप्तमालिकोंरफ़्तारनिर्माणलिमिटेडdescription" content="document.location.prot.getElementsByTagName(<!DOCTYPE html> <html <meta charset="utf-8">:url" content="http://.css" rel="stylesheet"style type="text/css">type="text/css" href="w3.org/1999/xhtml" xmltype="text/javascript" method="get" action="link rel="stylesheet" = document.getElementtype="image/x-icon" />cellpadding="0" cellsp.css" type="text/css" </a></li><li><a href="" width="1" height="1""><a href="http://www.style="display:none;">alternate" type="appli-//W3C//DTD XHTML 1.0 ellspacing="0" cellpad type="hidden" value="/a>&nbsp;<span role="s <input type="hidden" language="JavaScript" document.getElementsBg="0" cellspacing="0" ype="text/css" media="type='text/javascript'with the exception of ype="text/css" rel="st height="1" width="1" ='+encodeURIComponent(<link rel="alternate" body, tr, input, textmeta name="robots" conmethod="post" action="> <a href="http://www.css" rel="stylesheet" </div></div><div classlanguage="javascript">aria-hidden="true">·<ript" type="text/javasl=0;})(); (function(){background-image: url(/a></li><li><a href="h <li><a href="http://ator" aria-hidden="tru> <a href="http://www.language="javascript" /option> <option value/div></div><div class=rator" aria-hidden="tre=(new Date).getTime()português (do Brasil)организациивозможностьобразованиярегистрациивозможностиобязательна<!DOCTYPE html PUBLIC "nt-Type" content="text/<meta http-equiv="Conteransitional//EN" "http:<html xmlns="http://www-//W3C//DTD XHTML 1.0 TDTD/xhtml1-transitional//www.w3.org/TR/xhtml1/pe = 'text/javascript';<meta name="descriptionparentNode.insertBefore<input type="hidden" najs" type="text/javascri(document).ready(functiscript type="text/javasimage" content="http://UA-Compatible" content=tml; charset=utf-8" /> link rel="shortcut icon<link rel="stylesheet" </script> <script type== document.createElemen<a target="_blank" href= document.getElementsBinput type="text" name=a.type = 'text/javascrinput type="hidden" namehtml; charset=utf-8" />dtd"> <html xmlns="http-//W3C//DTD HTML 4.01 TentsByTagName('script')input type="hidden" nam<script type="text/javas" style="display:none;">document.getElementById(=document.createElement(' type='text/javascript'input type="text" name="d.getElementsByTagName(snical" href="http://www.C//DTD HTML 4.01 Transit<style type="text/css"> <style type="text/css">ional.dtd"> <html xmlns=http-equiv="Content-Typeding="0" cellspacing="0"html; charset=utf-8" /> style="display:none;"><<li><a href="http://www. type='text/javascript'>деятельностисоответствиипроизводствабезопасностиपुस्तिकाकांग्रेसउन्होंनेविधानसभाफिक्सिंगसुरक्षितकॉपीराइटविज्ञापनकार्रवाईसक्रियता
-1
dotnet/runtime
66,235
Revert "Fix issues related to JsonSerializerOptions mutation and caching."
Reverts dotnet/runtime#65863 Fixes #66232
jkotas
2022-03-05T06:23:45Z
2022-03-05T14:08:07Z
17662fc30cfd4cc6e7dbba978a3fb512380c0b70
3ede8095da51d5d27a890020b61376155e9a61c2
Revert "Fix issues related to JsonSerializerOptions mutation and caching.". Reverts dotnet/runtime#65863 Fixes #66232
./src/tests/JIT/Regression/CLR-x86-JIT/V1-M09.5-PDC/b14070/b14070.ilproj
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <DebugType>PdbOnly</DebugType> <Optimize>True</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="$(MSBuildProjectName).il" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <DebugType>PdbOnly</DebugType> <Optimize>True</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="$(MSBuildProjectName).il" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,235
Revert "Fix issues related to JsonSerializerOptions mutation and caching."
Reverts dotnet/runtime#65863 Fixes #66232
jkotas
2022-03-05T06:23:45Z
2022-03-05T14:08:07Z
17662fc30cfd4cc6e7dbba978a3fb512380c0b70
3ede8095da51d5d27a890020b61376155e9a61c2
Revert "Fix issues related to JsonSerializerOptions mutation and caching.". Reverts dotnet/runtime#65863 Fixes #66232
./src/coreclr/jit/importer_vectorization.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "jitpch.h" #ifdef _MSC_VER #pragma hdrstop #endif //------------------------------------------------------------------------ // importer_vectorization.cpp // // This file is responsible for various (partial) vectorizations during import phase, // e.g. the following APIs are currently supported: // // 1) String.Equals(string, string) // 2) String.Equals(string, string, StringComparison.Ordinal) // 3) str.Equals(string) // 4) str.Equals(String, StringComparison.Ordinal) // 5) str.StartsWith(string, StringComparison.Ordinal) // 6) MemoryExtensions.SequenceEqual<char>(ROS<char>, ROS<char>) // 7) MemoryExtensions.Equals(ROS<char>, ROS<char>, StringComparison.Ordinal) // 8) MemoryExtensions.StartsWith<char>(ROS<char>, ROS<char>) // 9) MemoryExtensions.StartsWith(ROS<char>, ROS<char>, StringComparison.Ordinal) // // When one of the arguments is a constant string of a [0..32] size so we can inline // a vectorized comparison against it using SWAR or SIMD techniques (e.g. via two V256 vectors) // // We might add these in future: // 1) OrdinalIgnoreCase for everything above // 2) Span.CopyTo // 3) Spans/Arrays of bytes (e.g. UTF8) against a constant RVA data // //------------------------------------------------------------------------ // impExpandHalfConstEqualsSIMD: Attempts to unroll and vectorize // Equals against a constant WCHAR data for Length in [8..32] range // using SIMD instructions. C# equivalent of what this function emits: // // bool IsTestString(ReadOnlySpan<char> span) // { // // Length and Null checks are not handled here // ref char s = ref MemoryMarshal.GetReference(span); // var v1 = Vector128.LoadUnsafe(ref s); // var v1 = Vector128.LoadUnsafe(ref s, span.Length - Vector128<ushort>.Count); // var cns1 = Vector128.Create('T', 'e', 's', 't', 'S', 't', 'r', 'i'); // var cns2 = Vector128.Create('s', 't', 'S', 't', 'r', 'i', 'n', 'g'); // return ((v1 ^ cns1) | (v2 ^ cns2)) == Vector<ushort>.Zero; // // // for: // // return span.SequenceEqual("TestString"); // } // // Arguments: // data - Pointer to a data to vectorize // cns - Constant data (array of 2-byte chars) // len - Number of chars in the cns // dataOffset - Offset for data // // Return Value: // A pointer to the newly created SIMD node or nullptr if unrolling is not // possible or not profitable // // Notes: // This function doesn't check obj for null or its Length, it's just an internal helper // for impExpandHalfConstEquals // GenTree* Compiler::impExpandHalfConstEqualsSIMD(GenTreeLclVar* data, WCHAR* cns, int len, int dataOffset) { assert(len >= 8 && len <= 32); #if defined(FEATURE_HW_INTRINSICS) && defined(TARGET_64BIT) if (!compOpportunisticallyDependsOn(InstructionSet_Vector128)) { // We need SSE2 or ADVSIMD at least return nullptr; } CorInfoType baseType = CORINFO_TYPE_ULONG; int simdSize; var_types simdType; NamedIntrinsic niZero; NamedIntrinsic niEquals; NamedIntrinsic niCreate; GenTree* cnsVec1; GenTree* cnsVec2; // Optimization: don't use two vectors for Length == 8 or 16 bool useSingleVector = false; #if defined(TARGET_XARCH) if (compOpportunisticallyDependsOn(InstructionSet_Vector256) && len >= 16) { // Handle [16..32] inputs via two Vector256 assert(len >= 16 && len <= 32); simdSize = 32; simdType = TYP_SIMD32; niZero = NI_Vector256_get_Zero; niEquals = NI_Vector256_op_Equality; niCreate = NI_Vector256_Create; // Special case: use a single vector for Length == 16 useSingleVector = len == 16; assert(sizeof(ssize_t) == 8); // this code is guarded with TARGET_64BIT GenTree* long1 = gtNewIconNode(*(ssize_t*)(cns + 0), TYP_LONG); GenTree* long2 = gtNewIconNode(*(ssize_t*)(cns + 4), TYP_LONG); GenTree* long3 = gtNewIconNode(*(ssize_t*)(cns + 8), TYP_LONG); GenTree* long4 = gtNewIconNode(*(ssize_t*)(cns + 12), TYP_LONG); cnsVec1 = gtNewSimdHWIntrinsicNode(simdType, long1, long2, long3, long4, niCreate, baseType, simdSize); // cnsVec2 most likely overlaps with cnsVec1: GenTree* long5 = gtNewIconNode(*(ssize_t*)(cns + len - 16), TYP_LONG); GenTree* long6 = gtNewIconNode(*(ssize_t*)(cns + len - 12), TYP_LONG); GenTree* long7 = gtNewIconNode(*(ssize_t*)(cns + len - 8), TYP_LONG); GenTree* long8 = gtNewIconNode(*(ssize_t*)(cns + len - 4), TYP_LONG); cnsVec2 = gtNewSimdHWIntrinsicNode(simdType, long5, long6, long7, long8, niCreate, baseType, simdSize); } else #endif if (len <= 16) { // Handle [8..16] inputs via two Vector128 assert(len >= 8 && len <= 16); simdSize = 16; simdType = TYP_SIMD16; niZero = NI_Vector128_get_Zero; niEquals = NI_Vector128_op_Equality; niCreate = NI_Vector128_Create; // Special case: use a single vector for Length == 8 useSingleVector = len == 8; assert(sizeof(ssize_t) == 8); // this code is guarded with TARGET_64BIT GenTree* long1 = gtNewIconNode(*(ssize_t*)(cns + 0), TYP_LONG); GenTree* long2 = gtNewIconNode(*(ssize_t*)(cns + 4), TYP_LONG); cnsVec1 = gtNewSimdHWIntrinsicNode(simdType, long1, long2, niCreate, baseType, simdSize); // cnsVec2 most likely overlaps with cnsVec1: GenTree* long3 = gtNewIconNode(*(ssize_t*)(cns + len - 8), TYP_LONG); GenTree* long4 = gtNewIconNode(*(ssize_t*)(cns + len - 4), TYP_LONG); cnsVec2 = gtNewSimdHWIntrinsicNode(simdType, long3, long4, niCreate, baseType, simdSize); } else { JITDUMP("impExpandHalfConstEqualsSIMD: No V256 support and data is too big for V128\n"); // NOTE: We might consider using four V128 for ARM64 return nullptr; } GenTree* zero = gtNewSimdHWIntrinsicNode(simdType, niZero, baseType, simdSize); GenTree* offset1 = gtNewIconNode(dataOffset, TYP_I_IMPL); GenTree* offset2 = gtNewIconNode(dataOffset + len * sizeof(USHORT) - simdSize, TYP_I_IMPL); GenTree* dataPtr1 = gtNewOperNode(GT_ADD, TYP_BYREF, data, offset1); GenTree* dataPtr2 = gtNewOperNode(GT_ADD, TYP_BYREF, gtClone(data), offset2); GenTree* vec1 = gtNewIndir(simdType, dataPtr1); GenTree* vec2 = gtNewIndir(simdType, dataPtr2); // TODO-Unroll-CQ: Spill vec1 and vec2 for better pipelining, currently we end up emitting: // // vmovdqu xmm0, xmmword ptr [rcx+12] // vpxor xmm0, xmm0, xmmword ptr[reloc @RWD00] // vmovdqu xmm1, xmmword ptr [rcx+20] // vpxor xmm1, xmm1, xmmword ptr[reloc @RWD16] // // While we should re-order them to be: // // vmovdqu xmm0, xmmword ptr [rcx+12] // vmovdqu xmm1, xmmword ptr [rcx+20] // vpxor xmm0, xmm0, xmmword ptr[reloc @RWD00] // vpxor xmm1, xmm1, xmmword ptr[reloc @RWD16] // // ((v1 ^ cns1) | (v2 ^ cns2)) == zero GenTree* xor1 = gtNewSimdBinOpNode(GT_XOR, simdType, vec1, cnsVec1, baseType, simdSize, false); GenTree* xor2 = gtNewSimdBinOpNode(GT_XOR, simdType, vec2, cnsVec2, baseType, simdSize, false); GenTree* orr = gtNewSimdBinOpNode(GT_OR, simdType, xor1, xor2, baseType, simdSize, false); return gtNewSimdHWIntrinsicNode(TYP_BOOL, useSingleVector ? xor1 : orr, zero, niEquals, baseType, simdSize); #else return nullptr; #endif } //------------------------------------------------------------------------ // impCreateCompareInd: creates the following tree: // // * EQ int // +--* IND <type> // | \--* ADD byref // | +--* <obj> // | \--* CNS_INT <offset> // \--* CNS_INT <value> // // Arguments: // comp - Compiler object // obj - GenTree representing data pointer // type - type for the IND node // offset - offset for the data pointer // value - constant value to compare against // // Return Value: // A tree with indirect load and comparison // static GenTree* impCreateCompareInd(Compiler* comp, GenTreeLclVar* obj, var_types type, ssize_t offset, ssize_t value) { GenTree* offsetTree = comp->gtNewIconNode(offset, TYP_I_IMPL); GenTree* addOffsetTree = comp->gtNewOperNode(GT_ADD, TYP_BYREF, obj, offsetTree); GenTree* indirTree = comp->gtNewIndir(type, addOffsetTree); GenTree* valueTree = comp->gtNewIconNode(value, genActualType(type)); return comp->gtNewOperNode(GT_EQ, TYP_INT, indirTree, valueTree); } //------------------------------------------------------------------------ // impExpandHalfConstEqualsSWAR: Attempts to unroll and vectorize // Equals against a constant WCHAR data for Length in [1..8] range // using SWAR (a sort of SIMD but for GPR registers and instructions) // // Arguments: // data - Pointer to a data to vectorize // cns - Constant data (array of 2-byte chars) // len - Number of chars in the cns // dataOffset - Offset for data // // Return Value: // A pointer to the newly created SWAR node or nullptr if unrolling is not // possible or not profitable // // Notes: // This function doesn't check obj for null or its Length, it's just an internal helper // for impExpandHalfConstEquals // GenTree* Compiler::impExpandHalfConstEqualsSWAR(GenTreeLclVar* data, WCHAR* cns, int len, int dataOffset) { assert(len >= 1 && len <= 8); // Compose Int32 or Int64 values from ushort components #define MAKEINT32(c1, c2) ((UINT64)c2 << 16) | ((UINT64)c1 << 0) #define MAKEINT64(c1, c2, c3, c4) ((UINT64)c4 << 48) | ((UINT64)c3 << 32) | ((UINT64)c2 << 16) | ((UINT64)c1 << 0) if (len == 1) { // [ ch1 ] // [value] // return impCreateCompareInd(this, data, TYP_SHORT, dataOffset, cns[0]); } if (len == 2) { // [ ch1 ][ ch2 ] // [ value ] // const UINT32 value = MAKEINT32(cns[0], cns[1]); return impCreateCompareInd(this, data, TYP_INT, dataOffset, value); } #ifdef TARGET_64BIT if (len == 3) { // handle len = 3 via two Int32 with overlapping: // // [ ch1 ][ ch2 ][ ch3 ] // [ value1 ] // [ value2 ] // // where offset for value2 is 2 bytes (1 char) // UINT32 value1 = MAKEINT32(cns[0], cns[1]); UINT32 value2 = MAKEINT32(cns[1], cns[2]); GenTree* firstIndir = impCreateCompareInd(this, data, TYP_INT, dataOffset, value1); GenTree* secondIndir = impCreateCompareInd(this, gtClone(data)->AsLclVar(), TYP_INT, dataOffset + sizeof(USHORT), value2); // TODO-Unroll-CQ: Consider merging two indirs via XOR instead of QMARK // e.g. gtNewOperNode(GT_XOR, TYP_INT, firstIndir, secondIndir); // but it currently has CQ issues (redundant movs) GenTreeColon* doubleIndirColon = gtNewColonNode(TYP_INT, secondIndir, gtNewFalse()); return gtNewQmarkNode(TYP_INT, firstIndir, doubleIndirColon); } assert(len >= 4 && len <= 8); UINT64 value1 = MAKEINT64(cns[0], cns[1], cns[2], cns[3]); if (len == 4) { // [ ch1 ][ ch2 ][ ch3 ][ ch4 ] // [ value ] // return impCreateCompareInd(this, data, TYP_LONG, dataOffset, value1); } // For 5..7 value2 will overlap with value1, e.g. for Length == 6: // // [ ch1 ][ ch2 ][ ch3 ][ ch4 ][ ch5 ][ ch6 ] // [ value1 ] // [ value2 ] // UINT64 value2 = MAKEINT64(cns[len - 4], cns[len - 3], cns[len - 2], cns[len - 1]); GenTree* firstIndir = impCreateCompareInd(this, data, TYP_LONG, dataOffset, value1); ssize_t offset = dataOffset + len * sizeof(WCHAR) - sizeof(UINT64); GenTree* secondIndir = impCreateCompareInd(this, gtClone(data)->AsLclVar(), TYP_LONG, offset, value2); // TODO-Unroll-CQ: Consider merging two indirs via XOR instead of QMARK GenTreeColon* doubleIndirColon = gtNewColonNode(TYP_INT, secondIndir, gtNewFalse()); return gtNewQmarkNode(TYP_INT, firstIndir, doubleIndirColon); #else // TARGET_64BIT return nullptr; #endif } //------------------------------------------------------------------------ // impExpandHalfConstEquals: Attempts to unroll and vectorize // Equals against a constant WCHAR data for Length in [8..32] range // using either SWAR or SIMD. In a general case it will look like this: // // bool equals = obj != null && obj.Length == len && (SWAR or SIMD) // // Arguments: // data - Pointer (LCL_VAR) to a data to vectorize // lengthFld - Pointer (LCL_VAR or GT_FIELD) to Length field // checkForNull - Check data for null // startsWith - Is it StartsWith or Equals? // cns - Constant data (array of 2-byte chars) // len - Number of 2-byte chars in the cns // dataOffset - Offset for data // // Return Value: // A pointer to the newly created SIMD node or nullptr if unrolling is not // possible or not profitable // GenTree* Compiler::impExpandHalfConstEquals(GenTreeLclVar* data, GenTree* lengthFld, bool checkForNull, bool startsWith, WCHAR* cnsData, int len, int dataOffset) { assert(len >= 0); if (compCurBB->isRunRarely()) { // Not profitable to expand JITDUMP("impExpandHalfConstEquals: block is cold - not profitable to expand.\n"); return nullptr; } if ((compIsForInlining() ? (fgBBcount + impInlineRoot()->fgBBcount) : (fgBBcount)) > 20) { // We don't want to unroll too much and in big methods // TODO-Unroll-CQ: come up with some better heuristic/budget JITDUMP("impExpandHalfConstEquals: method has too many BBs (>20) - not profitable to expand.\n"); return nullptr; } const genTreeOps cmpOp = startsWith ? GT_GE : GT_EQ; GenTree* elementsCount = gtNewIconNode(len); GenTree* lenCheckNode; if (len == 0) { // For zero length we don't need to compare content, the following expression is enough: // // varData != null && lengthFld == 0 // lenCheckNode = gtNewOperNode(cmpOp, TYP_INT, lengthFld, elementsCount); } else { assert(cnsData != nullptr); GenTree* indirCmp = nullptr; if (len < 8) // SWAR impl supports len == 8 but we'd better give it to SIMD { indirCmp = impExpandHalfConstEqualsSWAR(gtClone(data)->AsLclVar(), cnsData, len, dataOffset); } else if (len <= 32) { indirCmp = impExpandHalfConstEqualsSIMD(gtClone(data)->AsLclVar(), cnsData, len, dataOffset); } if (indirCmp == nullptr) { JITDUMP("unable to compose indirCmp\n"); return nullptr; } GenTreeColon* lenCheckColon = gtNewColonNode(TYP_INT, indirCmp, gtNewFalse()); // For StartsWith we use GT_GE, e.g.: `x.Length >= 10` lenCheckNode = gtNewQmarkNode(TYP_INT, gtNewOperNode(cmpOp, TYP_INT, lengthFld, elementsCount), lenCheckColon); } GenTree* rootQmark; if (checkForNull) { // varData == nullptr GenTreeColon* nullCheckColon = gtNewColonNode(TYP_INT, lenCheckNode, gtNewFalse()); rootQmark = gtNewQmarkNode(TYP_INT, gtNewOperNode(GT_NE, TYP_INT, data, gtNewNull()), nullCheckColon); } else { // no nullcheck, just "obj.Length == len && (SWAR or SIMD)" rootQmark = lenCheckNode; } return rootQmark; } //------------------------------------------------------------------------ // impGetStrConFromSpan: Try to obtain string literal out of a span: // var span = "str".AsSpan(); // var span = (ReadOnlySpan<char>)"str" // // Arguments: // span - String_op_Implicit or MemoryExtensions_AsSpan call // with a string literal // // Returns: // GenTreeStrCon node or nullptr // GenTreeStrCon* Compiler::impGetStrConFromSpan(GenTree* span) { GenTreeCall* argCall = nullptr; if (span->OperIs(GT_RET_EXPR)) { // NOTE: we don't support chains of RET_EXPR here GenTree* inlineCandidate = span->AsRetExpr()->gtInlineCandidate; if (inlineCandidate->OperIs(GT_CALL)) { argCall = inlineCandidate->AsCall(); } } else if (span->OperIs(GT_CALL)) { argCall = span->AsCall(); } if ((argCall != nullptr) && ((argCall->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) != 0)) { const NamedIntrinsic ni = lookupNamedIntrinsic(argCall->gtCallMethHnd); if ((ni == NI_System_MemoryExtensions_AsSpan) || (ni == NI_System_String_op_Implicit)) { assert(argCall->gtCallArgs->GetNext() == nullptr); if (argCall->gtCallArgs->GetNode()->OperIs(GT_CNS_STR)) { return argCall->gtCallArgs->GetNode()->AsStrCon(); } } } return nullptr; } //------------------------------------------------------------------------ // impStringEqualsOrStartsWith: The main entry-point for String methods // We're going to unroll & vectorize the following cases: // 1) String.Equals(obj, "cns") // 2) String.Equals(obj, "cns", StringComparison.Ordinal) // 3) String.Equals("cns", obj) // 4) String.Equals("cns", obj, StringComparison.Ordinal) // 5) obj.Equals("cns") // 5) obj.Equals("cns") // 6) obj.Equals("cns", StringComparison.Ordinal) // 7) "cns".Equals(obj) // 8) "cns".Equals(obj, StringComparison.Ordinal) // 9) obj.StartsWith("cns", StringComparison.Ordinal) // 10) "cns".StartsWith(obj, StringComparison.Ordinal) // // For cases 5, 6 and 9 we don't emit "obj != null" // NOTE: String.Equals(object) is not supported currently // // Arguments: // startsWith - Is it StartsWith or Equals? // sig - signature of StartsWith or Equals method // methodFlags - its flags // // Returns: // GenTree representing vectorized comparison or nullptr // GenTree* Compiler::impStringEqualsOrStartsWith(bool startsWith, CORINFO_SIG_INFO* sig, unsigned methodFlags) { const bool isStatic = methodFlags & CORINFO_FLG_STATIC; const int argsCount = sig->numArgs + (isStatic ? 0 : 1); GenTree* op1; GenTree* op2; if (argsCount == 3) // overload with StringComparison { if (!impStackTop(0).val->IsIntegralConst(4)) // StringComparison.Ordinal { // TODO-Unroll-CQ: Unroll & vectorize OrdinalIgnoreCase return nullptr; } op1 = impStackTop(2).val; op2 = impStackTop(1).val; } else { assert(argsCount == 2); op1 = impStackTop(1).val; op2 = impStackTop(0).val; } if (!(op1->OperIs(GT_CNS_STR) ^ op2->OperIs(GT_CNS_STR))) { // either op1 or op2 has to be CNS_STR, but not both - that case is optimized // just fine as is. return nullptr; } GenTree* varStr; GenTreeStrCon* cnsStr; if (op1->OperIs(GT_CNS_STR)) { cnsStr = op1->AsStrCon(); varStr = op2; } else { cnsStr = op2->AsStrCon(); varStr = op1; } bool needsNullcheck = true; if ((op1 != cnsStr) && !isStatic) { // for the following cases we should not check varStr for null: // // obj.Equals("cns") // obj.Equals("cns", StringComparison.Ordinal) // obj.StartsWith("cns", StringComparison.Ordinal) // // instead, it should throw NRE if it's null needsNullcheck = false; } int cnsLength = -1; const char16_t* str = nullptr; if (cnsStr->IsStringEmptyField()) { // check for fake "" first cnsLength = 0; JITDUMP("Trying to unroll String.Equals|StartsWith(op1, \"\")...\n", str) } else { str = info.compCompHnd->getStringLiteral(cnsStr->gtScpHnd, cnsStr->gtSconCPX, &cnsLength); if ((cnsLength < 0) || (str == nullptr)) { // We were unable to get the literal (e.g. dynamic context) return nullptr; } JITDUMP("Trying to unroll String.Equals|StartsWith(op1, \"%ws\")...\n", str) } // Create a temp which is safe to gtClone for varStr // We're not appending it as a statement until we figure out unrolling is profitable (and possible) unsigned varStrTmp = lvaGrabTemp(true DEBUGARG("spilling varStr")); lvaTable[varStrTmp].lvType = varStr->TypeGet(); GenTreeLclVar* varStrLcl = gtNewLclvNode(varStrTmp, varStr->TypeGet()); // Create a tree representing string's Length: // TODO-Unroll-CQ: Consider using ARR_LENGTH here, but we'll have to modify QMARK to propagate BBF_HAS_IDX_LEN int strLenOffset = OFFSETOF__CORINFO_String__stringLen; GenTree* lenOffset = gtNewIconNode(strLenOffset, TYP_I_IMPL); GenTree* lenNode = gtNewIndir(TYP_INT, gtNewOperNode(GT_ADD, TYP_BYREF, varStrLcl, lenOffset)); varStrLcl = gtClone(varStrLcl)->AsLclVar(); GenTree* unrolled = impExpandHalfConstEquals(varStrLcl, lenNode, needsNullcheck, startsWith, (WCHAR*)str, cnsLength, strLenOffset + sizeof(int)); if (unrolled != nullptr) { impAssignTempGen(varStrTmp, varStr); if (unrolled->OperIs(GT_QMARK)) { // QMARK nodes cannot reside on the evaluation stack unsigned rootTmp = lvaGrabTemp(true DEBUGARG("spilling unroll qmark")); impAssignTempGen(rootTmp, unrolled); unrolled = gtNewLclvNode(rootTmp, TYP_INT); } JITDUMP("\n... Successfully unrolled to:\n") DISPTREE(unrolled) for (int i = 0; i < argsCount; i++) { impPopStack(); } } return unrolled; } //------------------------------------------------------------------------ // impSpanEqualsOrStartsWith: The main entry-point for [ReadOnly]Span<char> methods // We're going to unroll & vectorize the following cases: // 1) MemoryExtensions.SequenceEqual<char>(var, "cns") // 2) MemoryExtensions.SequenceEqual<char>("cns", var) // 3) MemoryExtensions.Equals(var, "cns", StringComparison.Ordinal) // 4) MemoryExtensions.Equals("cns", var, StringComparison.Ordinal) // 5) MemoryExtensions.StartsWith<char>("cns", var) // 6) MemoryExtensions.StartsWith<char>(var, "cns") // 7) MemoryExtensions.StartsWith("cns", var, StringComparison.Ordinal) // 8) MemoryExtensions.StartsWith(var, "cns", StringComparison.Ordinal) // // Arguments: // startsWith - Is it StartsWith or Equals? // sig - signature of StartsWith or Equals method // methodFlags - its flags // // Returns: // GenTree representing vectorized comparison or nullptr // GenTree* Compiler::impSpanEqualsOrStartsWith(bool startsWith, CORINFO_SIG_INFO* sig, unsigned methodFlags) { const bool isStatic = methodFlags & CORINFO_FLG_STATIC; const int argsCount = sig->numArgs + (isStatic ? 0 : 1); GenTree* op1; GenTree* op2; if (argsCount == 3) // overload with StringComparison { if (!impStackTop(0).val->IsIntegralConst(4)) // StringComparison.Ordinal { // TODO-Unroll-CQ: Unroll & vectorize OrdinalIgnoreCase return nullptr; } op1 = impStackTop(2).val; op2 = impStackTop(1).val; } else { assert(argsCount == 2); op1 = impStackTop(1).val; op2 = impStackTop(0).val; } // For generic StartsWith and Equals we need to make sure T is char if (sig->sigInst.methInstCount != 0) { assert(sig->sigInst.methInstCount == 1); CORINFO_CLASS_HANDLE targetElemHnd = sig->sigInst.methInst[0]; CorInfoType typ = info.compCompHnd->getTypeForPrimitiveValueClass(targetElemHnd); if ((typ != CORINFO_TYPE_SHORT) && (typ != CORINFO_TYPE_USHORT) && (typ != CORINFO_TYPE_CHAR)) { return nullptr; } } // Try to obtain original string literals out of span arguments GenTreeStrCon* op1Str = impGetStrConFromSpan(op1); GenTreeStrCon* op2Str = impGetStrConFromSpan(op2); if (!((op1Str != nullptr) ^ (op2Str != nullptr))) { // either op1 or op2 has to be '(ReadOnlySpan)"cns"' return nullptr; } GenTree* spanObj; GenTreeStrCon* cnsStr; if (op1Str != nullptr) { cnsStr = op1Str; spanObj = op2; } else { cnsStr = op2Str; spanObj = op1; } int cnsLength = -1; const char16_t* str = nullptr; if (cnsStr->IsStringEmptyField()) { // check for fake "" first cnsLength = 0; JITDUMP("Trying to unroll MemoryExtensions.Equals|SequenceEqual|StartsWith(op1, \"\")...\n", str) } else { str = info.compCompHnd->getStringLiteral(cnsStr->gtScpHnd, cnsStr->gtSconCPX, &cnsLength); if (cnsLength < 0 || str == nullptr) { // We were unable to get the literal (e.g. dynamic context) return nullptr; } JITDUMP("Trying to unroll MemoryExtensions.Equals|SequenceEqual|StartsWith(op1, \"%ws\")...\n", str) } CORINFO_CLASS_HANDLE spanCls = gtGetStructHandle(spanObj); CORINFO_FIELD_HANDLE pointerHnd = info.compCompHnd->getFieldInClass(spanCls, 0); CORINFO_FIELD_HANDLE lengthHnd = info.compCompHnd->getFieldInClass(spanCls, 1); const unsigned lengthOffset = info.compCompHnd->getFieldOffset(lengthHnd); // Create a placeholder for Span object - we're not going to Append it to statements // in advance to avoid redundant spills in case if we fail to vectorize unsigned spanObjRef = lvaGrabTemp(true DEBUGARG("spanObj tmp")); unsigned spanDataTmp = lvaGrabTemp(true DEBUGARG("spanData tmp")); lvaTable[spanObjRef].lvType = TYP_BYREF; lvaTable[spanDataTmp].lvType = TYP_BYREF; GenTreeLclVar* spanObjRefLcl = gtNewLclvNode(spanObjRef, TYP_BYREF); GenTreeLclVar* spanDataTmpLcl = gtNewLclvNode(spanDataTmp, TYP_BYREF); GenTreeField* spanLength = gtNewFieldRef(TYP_INT, lengthHnd, gtClone(spanObjRefLcl), lengthOffset); GenTreeField* spanData = gtNewFieldRef(TYP_BYREF, pointerHnd, spanObjRefLcl); GenTree* unrolled = impExpandHalfConstEquals(spanDataTmpLcl, spanLength, false, startsWith, (WCHAR*)str, cnsLength, 0); if (unrolled != nullptr) { // We succeeded, fill the placeholders: impAssignTempGen(spanObjRef, impGetStructAddr(spanObj, spanCls, (unsigned)CHECK_SPILL_NONE, true)); impAssignTempGen(spanDataTmp, spanData); if (unrolled->OperIs(GT_QMARK)) { // QMARK can't be a root node, spill it to a temp unsigned rootTmp = lvaGrabTemp(true DEBUGARG("spilling unroll qmark")); impAssignTempGen(rootTmp, unrolled); unrolled = gtNewLclvNode(rootTmp, TYP_INT); } JITDUMP("... Successfully unrolled to:\n") DISPTREE(unrolled) for (int i = 0; i < argsCount; i++) { impPopStack(); } // We have to clean up GT_RET_EXPR for String.op_Implicit or MemoryExtensions.AsSpans if ((spanObj != op1) && op1->OperIs(GT_RET_EXPR)) { GenTree* inlineCandidate = op1->AsRetExpr()->gtInlineCandidate; assert(inlineCandidate->IsCall()); inlineCandidate->gtBashToNOP(); } else if ((spanObj != op2) && op2->OperIs(GT_RET_EXPR)) { GenTree* inlineCandidate = op2->AsRetExpr()->gtInlineCandidate; assert(inlineCandidate->IsCall()); inlineCandidate->gtBashToNOP(); } } return unrolled; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "jitpch.h" #ifdef _MSC_VER #pragma hdrstop #endif //------------------------------------------------------------------------ // importer_vectorization.cpp // // This file is responsible for various (partial) vectorizations during import phase, // e.g. the following APIs are currently supported: // // 1) String.Equals(string, string) // 2) String.Equals(string, string, StringComparison.Ordinal) // 3) str.Equals(string) // 4) str.Equals(String, StringComparison.Ordinal) // 5) str.StartsWith(string, StringComparison.Ordinal) // 6) MemoryExtensions.SequenceEqual<char>(ROS<char>, ROS<char>) // 7) MemoryExtensions.Equals(ROS<char>, ROS<char>, StringComparison.Ordinal) // 8) MemoryExtensions.StartsWith<char>(ROS<char>, ROS<char>) // 9) MemoryExtensions.StartsWith(ROS<char>, ROS<char>, StringComparison.Ordinal) // // When one of the arguments is a constant string of a [0..32] size so we can inline // a vectorized comparison against it using SWAR or SIMD techniques (e.g. via two V256 vectors) // // We might add these in future: // 1) OrdinalIgnoreCase for everything above // 2) Span.CopyTo // 3) Spans/Arrays of bytes (e.g. UTF8) against a constant RVA data // //------------------------------------------------------------------------ // impExpandHalfConstEqualsSIMD: Attempts to unroll and vectorize // Equals against a constant WCHAR data for Length in [8..32] range // using SIMD instructions. C# equivalent of what this function emits: // // bool IsTestString(ReadOnlySpan<char> span) // { // // Length and Null checks are not handled here // ref char s = ref MemoryMarshal.GetReference(span); // var v1 = Vector128.LoadUnsafe(ref s); // var v1 = Vector128.LoadUnsafe(ref s, span.Length - Vector128<ushort>.Count); // var cns1 = Vector128.Create('T', 'e', 's', 't', 'S', 't', 'r', 'i'); // var cns2 = Vector128.Create('s', 't', 'S', 't', 'r', 'i', 'n', 'g'); // return ((v1 ^ cns1) | (v2 ^ cns2)) == Vector<ushort>.Zero; // // // for: // // return span.SequenceEqual("TestString"); // } // // Arguments: // data - Pointer to a data to vectorize // cns - Constant data (array of 2-byte chars) // len - Number of chars in the cns // dataOffset - Offset for data // // Return Value: // A pointer to the newly created SIMD node or nullptr if unrolling is not // possible or not profitable // // Notes: // This function doesn't check obj for null or its Length, it's just an internal helper // for impExpandHalfConstEquals // GenTree* Compiler::impExpandHalfConstEqualsSIMD(GenTreeLclVar* data, WCHAR* cns, int len, int dataOffset) { assert(len >= 8 && len <= 32); #if defined(FEATURE_HW_INTRINSICS) && defined(TARGET_64BIT) if (!compOpportunisticallyDependsOn(InstructionSet_Vector128)) { // We need SSE2 or ADVSIMD at least return nullptr; } CorInfoType baseType = CORINFO_TYPE_ULONG; int simdSize; var_types simdType; NamedIntrinsic niZero; NamedIntrinsic niEquals; NamedIntrinsic niCreate; GenTree* cnsVec1; GenTree* cnsVec2; // Optimization: don't use two vectors for Length == 8 or 16 bool useSingleVector = false; #if defined(TARGET_XARCH) if (compOpportunisticallyDependsOn(InstructionSet_Vector256) && len >= 16) { // Handle [16..32] inputs via two Vector256 assert(len >= 16 && len <= 32); simdSize = 32; simdType = TYP_SIMD32; niZero = NI_Vector256_get_Zero; niEquals = NI_Vector256_op_Equality; niCreate = NI_Vector256_Create; // Special case: use a single vector for Length == 16 useSingleVector = len == 16; assert(sizeof(ssize_t) == 8); // this code is guarded with TARGET_64BIT GenTree* long1 = gtNewIconNode(*(ssize_t*)(cns + 0), TYP_LONG); GenTree* long2 = gtNewIconNode(*(ssize_t*)(cns + 4), TYP_LONG); GenTree* long3 = gtNewIconNode(*(ssize_t*)(cns + 8), TYP_LONG); GenTree* long4 = gtNewIconNode(*(ssize_t*)(cns + 12), TYP_LONG); cnsVec1 = gtNewSimdHWIntrinsicNode(simdType, long1, long2, long3, long4, niCreate, baseType, simdSize); // cnsVec2 most likely overlaps with cnsVec1: GenTree* long5 = gtNewIconNode(*(ssize_t*)(cns + len - 16), TYP_LONG); GenTree* long6 = gtNewIconNode(*(ssize_t*)(cns + len - 12), TYP_LONG); GenTree* long7 = gtNewIconNode(*(ssize_t*)(cns + len - 8), TYP_LONG); GenTree* long8 = gtNewIconNode(*(ssize_t*)(cns + len - 4), TYP_LONG); cnsVec2 = gtNewSimdHWIntrinsicNode(simdType, long5, long6, long7, long8, niCreate, baseType, simdSize); } else #endif if (len <= 16) { // Handle [8..16] inputs via two Vector128 assert(len >= 8 && len <= 16); simdSize = 16; simdType = TYP_SIMD16; niZero = NI_Vector128_get_Zero; niEquals = NI_Vector128_op_Equality; niCreate = NI_Vector128_Create; // Special case: use a single vector for Length == 8 useSingleVector = len == 8; assert(sizeof(ssize_t) == 8); // this code is guarded with TARGET_64BIT GenTree* long1 = gtNewIconNode(*(ssize_t*)(cns + 0), TYP_LONG); GenTree* long2 = gtNewIconNode(*(ssize_t*)(cns + 4), TYP_LONG); cnsVec1 = gtNewSimdHWIntrinsicNode(simdType, long1, long2, niCreate, baseType, simdSize); // cnsVec2 most likely overlaps with cnsVec1: GenTree* long3 = gtNewIconNode(*(ssize_t*)(cns + len - 8), TYP_LONG); GenTree* long4 = gtNewIconNode(*(ssize_t*)(cns + len - 4), TYP_LONG); cnsVec2 = gtNewSimdHWIntrinsicNode(simdType, long3, long4, niCreate, baseType, simdSize); } else { JITDUMP("impExpandHalfConstEqualsSIMD: No V256 support and data is too big for V128\n"); // NOTE: We might consider using four V128 for ARM64 return nullptr; } GenTree* zero = gtNewSimdHWIntrinsicNode(simdType, niZero, baseType, simdSize); GenTree* offset1 = gtNewIconNode(dataOffset, TYP_I_IMPL); GenTree* offset2 = gtNewIconNode(dataOffset + len * sizeof(USHORT) - simdSize, TYP_I_IMPL); GenTree* dataPtr1 = gtNewOperNode(GT_ADD, TYP_BYREF, data, offset1); GenTree* dataPtr2 = gtNewOperNode(GT_ADD, TYP_BYREF, gtClone(data), offset2); GenTree* vec1 = gtNewIndir(simdType, dataPtr1); GenTree* vec2 = gtNewIndir(simdType, dataPtr2); // TODO-Unroll-CQ: Spill vec1 and vec2 for better pipelining, currently we end up emitting: // // vmovdqu xmm0, xmmword ptr [rcx+12] // vpxor xmm0, xmm0, xmmword ptr[reloc @RWD00] // vmovdqu xmm1, xmmword ptr [rcx+20] // vpxor xmm1, xmm1, xmmword ptr[reloc @RWD16] // // While we should re-order them to be: // // vmovdqu xmm0, xmmword ptr [rcx+12] // vmovdqu xmm1, xmmword ptr [rcx+20] // vpxor xmm0, xmm0, xmmword ptr[reloc @RWD00] // vpxor xmm1, xmm1, xmmword ptr[reloc @RWD16] // // ((v1 ^ cns1) | (v2 ^ cns2)) == zero GenTree* xor1 = gtNewSimdBinOpNode(GT_XOR, simdType, vec1, cnsVec1, baseType, simdSize, false); GenTree* xor2 = gtNewSimdBinOpNode(GT_XOR, simdType, vec2, cnsVec2, baseType, simdSize, false); GenTree* orr = gtNewSimdBinOpNode(GT_OR, simdType, xor1, xor2, baseType, simdSize, false); return gtNewSimdHWIntrinsicNode(TYP_BOOL, useSingleVector ? xor1 : orr, zero, niEquals, baseType, simdSize); #else return nullptr; #endif } //------------------------------------------------------------------------ // impCreateCompareInd: creates the following tree: // // * EQ int // +--* IND <type> // | \--* ADD byref // | +--* <obj> // | \--* CNS_INT <offset> // \--* CNS_INT <value> // // Arguments: // comp - Compiler object // obj - GenTree representing data pointer // type - type for the IND node // offset - offset for the data pointer // value - constant value to compare against // // Return Value: // A tree with indirect load and comparison // static GenTree* impCreateCompareInd(Compiler* comp, GenTreeLclVar* obj, var_types type, ssize_t offset, ssize_t value) { GenTree* offsetTree = comp->gtNewIconNode(offset, TYP_I_IMPL); GenTree* addOffsetTree = comp->gtNewOperNode(GT_ADD, TYP_BYREF, obj, offsetTree); GenTree* indirTree = comp->gtNewIndir(type, addOffsetTree); GenTree* valueTree = comp->gtNewIconNode(value, genActualType(type)); return comp->gtNewOperNode(GT_EQ, TYP_INT, indirTree, valueTree); } //------------------------------------------------------------------------ // impExpandHalfConstEqualsSWAR: Attempts to unroll and vectorize // Equals against a constant WCHAR data for Length in [1..8] range // using SWAR (a sort of SIMD but for GPR registers and instructions) // // Arguments: // data - Pointer to a data to vectorize // cns - Constant data (array of 2-byte chars) // len - Number of chars in the cns // dataOffset - Offset for data // // Return Value: // A pointer to the newly created SWAR node or nullptr if unrolling is not // possible or not profitable // // Notes: // This function doesn't check obj for null or its Length, it's just an internal helper // for impExpandHalfConstEquals // GenTree* Compiler::impExpandHalfConstEqualsSWAR(GenTreeLclVar* data, WCHAR* cns, int len, int dataOffset) { assert(len >= 1 && len <= 8); // Compose Int32 or Int64 values from ushort components #define MAKEINT32(c1, c2) ((UINT64)c2 << 16) | ((UINT64)c1 << 0) #define MAKEINT64(c1, c2, c3, c4) ((UINT64)c4 << 48) | ((UINT64)c3 << 32) | ((UINT64)c2 << 16) | ((UINT64)c1 << 0) if (len == 1) { // [ ch1 ] // [value] // return impCreateCompareInd(this, data, TYP_SHORT, dataOffset, cns[0]); } if (len == 2) { // [ ch1 ][ ch2 ] // [ value ] // const UINT32 value = MAKEINT32(cns[0], cns[1]); return impCreateCompareInd(this, data, TYP_INT, dataOffset, value); } #ifdef TARGET_64BIT if (len == 3) { // handle len = 3 via two Int32 with overlapping: // // [ ch1 ][ ch2 ][ ch3 ] // [ value1 ] // [ value2 ] // // where offset for value2 is 2 bytes (1 char) // UINT32 value1 = MAKEINT32(cns[0], cns[1]); UINT32 value2 = MAKEINT32(cns[1], cns[2]); GenTree* firstIndir = impCreateCompareInd(this, data, TYP_INT, dataOffset, value1); GenTree* secondIndir = impCreateCompareInd(this, gtClone(data)->AsLclVar(), TYP_INT, dataOffset + sizeof(USHORT), value2); // TODO-Unroll-CQ: Consider merging two indirs via XOR instead of QMARK // e.g. gtNewOperNode(GT_XOR, TYP_INT, firstIndir, secondIndir); // but it currently has CQ issues (redundant movs) GenTreeColon* doubleIndirColon = gtNewColonNode(TYP_INT, secondIndir, gtNewFalse()); return gtNewQmarkNode(TYP_INT, firstIndir, doubleIndirColon); } assert(len >= 4 && len <= 8); UINT64 value1 = MAKEINT64(cns[0], cns[1], cns[2], cns[3]); if (len == 4) { // [ ch1 ][ ch2 ][ ch3 ][ ch4 ] // [ value ] // return impCreateCompareInd(this, data, TYP_LONG, dataOffset, value1); } // For 5..7 value2 will overlap with value1, e.g. for Length == 6: // // [ ch1 ][ ch2 ][ ch3 ][ ch4 ][ ch5 ][ ch6 ] // [ value1 ] // [ value2 ] // UINT64 value2 = MAKEINT64(cns[len - 4], cns[len - 3], cns[len - 2], cns[len - 1]); GenTree* firstIndir = impCreateCompareInd(this, data, TYP_LONG, dataOffset, value1); ssize_t offset = dataOffset + len * sizeof(WCHAR) - sizeof(UINT64); GenTree* secondIndir = impCreateCompareInd(this, gtClone(data)->AsLclVar(), TYP_LONG, offset, value2); // TODO-Unroll-CQ: Consider merging two indirs via XOR instead of QMARK GenTreeColon* doubleIndirColon = gtNewColonNode(TYP_INT, secondIndir, gtNewFalse()); return gtNewQmarkNode(TYP_INT, firstIndir, doubleIndirColon); #else // TARGET_64BIT return nullptr; #endif } //------------------------------------------------------------------------ // impExpandHalfConstEquals: Attempts to unroll and vectorize // Equals against a constant WCHAR data for Length in [8..32] range // using either SWAR or SIMD. In a general case it will look like this: // // bool equals = obj != null && obj.Length == len && (SWAR or SIMD) // // Arguments: // data - Pointer (LCL_VAR) to a data to vectorize // lengthFld - Pointer (LCL_VAR or GT_FIELD) to Length field // checkForNull - Check data for null // startsWith - Is it StartsWith or Equals? // cns - Constant data (array of 2-byte chars) // len - Number of 2-byte chars in the cns // dataOffset - Offset for data // // Return Value: // A pointer to the newly created SIMD node or nullptr if unrolling is not // possible or not profitable // GenTree* Compiler::impExpandHalfConstEquals(GenTreeLclVar* data, GenTree* lengthFld, bool checkForNull, bool startsWith, WCHAR* cnsData, int len, int dataOffset) { assert(len >= 0); if (compCurBB->isRunRarely()) { // Not profitable to expand JITDUMP("impExpandHalfConstEquals: block is cold - not profitable to expand.\n"); return nullptr; } if ((compIsForInlining() ? (fgBBcount + impInlineRoot()->fgBBcount) : (fgBBcount)) > 20) { // We don't want to unroll too much and in big methods // TODO-Unroll-CQ: come up with some better heuristic/budget JITDUMP("impExpandHalfConstEquals: method has too many BBs (>20) - not profitable to expand.\n"); return nullptr; } const genTreeOps cmpOp = startsWith ? GT_GE : GT_EQ; GenTree* elementsCount = gtNewIconNode(len); GenTree* lenCheckNode; if (len == 0) { // For zero length we don't need to compare content, the following expression is enough: // // varData != null && lengthFld == 0 // lenCheckNode = gtNewOperNode(cmpOp, TYP_INT, lengthFld, elementsCount); } else { assert(cnsData != nullptr); GenTree* indirCmp = nullptr; if (len < 8) // SWAR impl supports len == 8 but we'd better give it to SIMD { indirCmp = impExpandHalfConstEqualsSWAR(gtClone(data)->AsLclVar(), cnsData, len, dataOffset); } else if (len <= 32) { indirCmp = impExpandHalfConstEqualsSIMD(gtClone(data)->AsLclVar(), cnsData, len, dataOffset); } if (indirCmp == nullptr) { JITDUMP("unable to compose indirCmp\n"); return nullptr; } GenTreeColon* lenCheckColon = gtNewColonNode(TYP_INT, indirCmp, gtNewFalse()); // For StartsWith we use GT_GE, e.g.: `x.Length >= 10` lenCheckNode = gtNewQmarkNode(TYP_INT, gtNewOperNode(cmpOp, TYP_INT, lengthFld, elementsCount), lenCheckColon); } GenTree* rootQmark; if (checkForNull) { // varData == nullptr GenTreeColon* nullCheckColon = gtNewColonNode(TYP_INT, lenCheckNode, gtNewFalse()); rootQmark = gtNewQmarkNode(TYP_INT, gtNewOperNode(GT_NE, TYP_INT, data, gtNewNull()), nullCheckColon); } else { // no nullcheck, just "obj.Length == len && (SWAR or SIMD)" rootQmark = lenCheckNode; } return rootQmark; } //------------------------------------------------------------------------ // impGetStrConFromSpan: Try to obtain string literal out of a span: // var span = "str".AsSpan(); // var span = (ReadOnlySpan<char>)"str" // // Arguments: // span - String_op_Implicit or MemoryExtensions_AsSpan call // with a string literal // // Returns: // GenTreeStrCon node or nullptr // GenTreeStrCon* Compiler::impGetStrConFromSpan(GenTree* span) { GenTreeCall* argCall = nullptr; if (span->OperIs(GT_RET_EXPR)) { // NOTE: we don't support chains of RET_EXPR here GenTree* inlineCandidate = span->AsRetExpr()->gtInlineCandidate; if (inlineCandidate->OperIs(GT_CALL)) { argCall = inlineCandidate->AsCall(); } } else if (span->OperIs(GT_CALL)) { argCall = span->AsCall(); } if ((argCall != nullptr) && ((argCall->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) != 0)) { const NamedIntrinsic ni = lookupNamedIntrinsic(argCall->gtCallMethHnd); if ((ni == NI_System_MemoryExtensions_AsSpan) || (ni == NI_System_String_op_Implicit)) { assert(argCall->gtCallArgs->GetNext() == nullptr); if (argCall->gtCallArgs->GetNode()->OperIs(GT_CNS_STR)) { return argCall->gtCallArgs->GetNode()->AsStrCon(); } } } return nullptr; } //------------------------------------------------------------------------ // impStringEqualsOrStartsWith: The main entry-point for String methods // We're going to unroll & vectorize the following cases: // 1) String.Equals(obj, "cns") // 2) String.Equals(obj, "cns", StringComparison.Ordinal) // 3) String.Equals("cns", obj) // 4) String.Equals("cns", obj, StringComparison.Ordinal) // 5) obj.Equals("cns") // 5) obj.Equals("cns") // 6) obj.Equals("cns", StringComparison.Ordinal) // 7) "cns".Equals(obj) // 8) "cns".Equals(obj, StringComparison.Ordinal) // 9) obj.StartsWith("cns", StringComparison.Ordinal) // 10) "cns".StartsWith(obj, StringComparison.Ordinal) // // For cases 5, 6 and 9 we don't emit "obj != null" // NOTE: String.Equals(object) is not supported currently // // Arguments: // startsWith - Is it StartsWith or Equals? // sig - signature of StartsWith or Equals method // methodFlags - its flags // // Returns: // GenTree representing vectorized comparison or nullptr // GenTree* Compiler::impStringEqualsOrStartsWith(bool startsWith, CORINFO_SIG_INFO* sig, unsigned methodFlags) { const bool isStatic = methodFlags & CORINFO_FLG_STATIC; const int argsCount = sig->numArgs + (isStatic ? 0 : 1); GenTree* op1; GenTree* op2; if (argsCount == 3) // overload with StringComparison { if (!impStackTop(0).val->IsIntegralConst(4)) // StringComparison.Ordinal { // TODO-Unroll-CQ: Unroll & vectorize OrdinalIgnoreCase return nullptr; } op1 = impStackTop(2).val; op2 = impStackTop(1).val; } else { assert(argsCount == 2); op1 = impStackTop(1).val; op2 = impStackTop(0).val; } if (!(op1->OperIs(GT_CNS_STR) ^ op2->OperIs(GT_CNS_STR))) { // either op1 or op2 has to be CNS_STR, but not both - that case is optimized // just fine as is. return nullptr; } GenTree* varStr; GenTreeStrCon* cnsStr; if (op1->OperIs(GT_CNS_STR)) { cnsStr = op1->AsStrCon(); varStr = op2; } else { cnsStr = op2->AsStrCon(); varStr = op1; } bool needsNullcheck = true; if ((op1 != cnsStr) && !isStatic) { // for the following cases we should not check varStr for null: // // obj.Equals("cns") // obj.Equals("cns", StringComparison.Ordinal) // obj.StartsWith("cns", StringComparison.Ordinal) // // instead, it should throw NRE if it's null needsNullcheck = false; } int cnsLength = -1; const char16_t* str = nullptr; if (cnsStr->IsStringEmptyField()) { // check for fake "" first cnsLength = 0; JITDUMP("Trying to unroll String.Equals|StartsWith(op1, \"\")...\n", str) } else { str = info.compCompHnd->getStringLiteral(cnsStr->gtScpHnd, cnsStr->gtSconCPX, &cnsLength); if ((cnsLength < 0) || (str == nullptr)) { // We were unable to get the literal (e.g. dynamic context) return nullptr; } JITDUMP("Trying to unroll String.Equals|StartsWith(op1, \"%ws\")...\n", str) } // Create a temp which is safe to gtClone for varStr // We're not appending it as a statement until we figure out unrolling is profitable (and possible) unsigned varStrTmp = lvaGrabTemp(true DEBUGARG("spilling varStr")); lvaTable[varStrTmp].lvType = varStr->TypeGet(); GenTreeLclVar* varStrLcl = gtNewLclvNode(varStrTmp, varStr->TypeGet()); // Create a tree representing string's Length: // TODO-Unroll-CQ: Consider using ARR_LENGTH here, but we'll have to modify QMARK to propagate BBF_HAS_IDX_LEN int strLenOffset = OFFSETOF__CORINFO_String__stringLen; GenTree* lenOffset = gtNewIconNode(strLenOffset, TYP_I_IMPL); GenTree* lenNode = gtNewIndir(TYP_INT, gtNewOperNode(GT_ADD, TYP_BYREF, varStrLcl, lenOffset)); varStrLcl = gtClone(varStrLcl)->AsLclVar(); GenTree* unrolled = impExpandHalfConstEquals(varStrLcl, lenNode, needsNullcheck, startsWith, (WCHAR*)str, cnsLength, strLenOffset + sizeof(int)); if (unrolled != nullptr) { impAssignTempGen(varStrTmp, varStr); if (unrolled->OperIs(GT_QMARK)) { // QMARK nodes cannot reside on the evaluation stack unsigned rootTmp = lvaGrabTemp(true DEBUGARG("spilling unroll qmark")); impAssignTempGen(rootTmp, unrolled); unrolled = gtNewLclvNode(rootTmp, TYP_INT); } JITDUMP("\n... Successfully unrolled to:\n") DISPTREE(unrolled) for (int i = 0; i < argsCount; i++) { impPopStack(); } } return unrolled; } //------------------------------------------------------------------------ // impSpanEqualsOrStartsWith: The main entry-point for [ReadOnly]Span<char> methods // We're going to unroll & vectorize the following cases: // 1) MemoryExtensions.SequenceEqual<char>(var, "cns") // 2) MemoryExtensions.SequenceEqual<char>("cns", var) // 3) MemoryExtensions.Equals(var, "cns", StringComparison.Ordinal) // 4) MemoryExtensions.Equals("cns", var, StringComparison.Ordinal) // 5) MemoryExtensions.StartsWith<char>("cns", var) // 6) MemoryExtensions.StartsWith<char>(var, "cns") // 7) MemoryExtensions.StartsWith("cns", var, StringComparison.Ordinal) // 8) MemoryExtensions.StartsWith(var, "cns", StringComparison.Ordinal) // // Arguments: // startsWith - Is it StartsWith or Equals? // sig - signature of StartsWith or Equals method // methodFlags - its flags // // Returns: // GenTree representing vectorized comparison or nullptr // GenTree* Compiler::impSpanEqualsOrStartsWith(bool startsWith, CORINFO_SIG_INFO* sig, unsigned methodFlags) { const bool isStatic = methodFlags & CORINFO_FLG_STATIC; const int argsCount = sig->numArgs + (isStatic ? 0 : 1); GenTree* op1; GenTree* op2; if (argsCount == 3) // overload with StringComparison { if (!impStackTop(0).val->IsIntegralConst(4)) // StringComparison.Ordinal { // TODO-Unroll-CQ: Unroll & vectorize OrdinalIgnoreCase return nullptr; } op1 = impStackTop(2).val; op2 = impStackTop(1).val; } else { assert(argsCount == 2); op1 = impStackTop(1).val; op2 = impStackTop(0).val; } // For generic StartsWith and Equals we need to make sure T is char if (sig->sigInst.methInstCount != 0) { assert(sig->sigInst.methInstCount == 1); CORINFO_CLASS_HANDLE targetElemHnd = sig->sigInst.methInst[0]; CorInfoType typ = info.compCompHnd->getTypeForPrimitiveValueClass(targetElemHnd); if ((typ != CORINFO_TYPE_SHORT) && (typ != CORINFO_TYPE_USHORT) && (typ != CORINFO_TYPE_CHAR)) { return nullptr; } } // Try to obtain original string literals out of span arguments GenTreeStrCon* op1Str = impGetStrConFromSpan(op1); GenTreeStrCon* op2Str = impGetStrConFromSpan(op2); if (!((op1Str != nullptr) ^ (op2Str != nullptr))) { // either op1 or op2 has to be '(ReadOnlySpan)"cns"' return nullptr; } GenTree* spanObj; GenTreeStrCon* cnsStr; if (op1Str != nullptr) { cnsStr = op1Str; spanObj = op2; } else { cnsStr = op2Str; spanObj = op1; } int cnsLength = -1; const char16_t* str = nullptr; if (cnsStr->IsStringEmptyField()) { // check for fake "" first cnsLength = 0; JITDUMP("Trying to unroll MemoryExtensions.Equals|SequenceEqual|StartsWith(op1, \"\")...\n", str) } else { str = info.compCompHnd->getStringLiteral(cnsStr->gtScpHnd, cnsStr->gtSconCPX, &cnsLength); if (cnsLength < 0 || str == nullptr) { // We were unable to get the literal (e.g. dynamic context) return nullptr; } JITDUMP("Trying to unroll MemoryExtensions.Equals|SequenceEqual|StartsWith(op1, \"%ws\")...\n", str) } CORINFO_CLASS_HANDLE spanCls = gtGetStructHandle(spanObj); CORINFO_FIELD_HANDLE pointerHnd = info.compCompHnd->getFieldInClass(spanCls, 0); CORINFO_FIELD_HANDLE lengthHnd = info.compCompHnd->getFieldInClass(spanCls, 1); const unsigned lengthOffset = info.compCompHnd->getFieldOffset(lengthHnd); // Create a placeholder for Span object - we're not going to Append it to statements // in advance to avoid redundant spills in case if we fail to vectorize unsigned spanObjRef = lvaGrabTemp(true DEBUGARG("spanObj tmp")); unsigned spanDataTmp = lvaGrabTemp(true DEBUGARG("spanData tmp")); lvaTable[spanObjRef].lvType = TYP_BYREF; lvaTable[spanDataTmp].lvType = TYP_BYREF; GenTreeLclVar* spanObjRefLcl = gtNewLclvNode(spanObjRef, TYP_BYREF); GenTreeLclVar* spanDataTmpLcl = gtNewLclvNode(spanDataTmp, TYP_BYREF); GenTreeField* spanLength = gtNewFieldRef(TYP_INT, lengthHnd, gtClone(spanObjRefLcl), lengthOffset); GenTreeField* spanData = gtNewFieldRef(TYP_BYREF, pointerHnd, spanObjRefLcl); GenTree* unrolled = impExpandHalfConstEquals(spanDataTmpLcl, spanLength, false, startsWith, (WCHAR*)str, cnsLength, 0); if (unrolled != nullptr) { // We succeeded, fill the placeholders: impAssignTempGen(spanObjRef, impGetStructAddr(spanObj, spanCls, (unsigned)CHECK_SPILL_NONE, true)); impAssignTempGen(spanDataTmp, spanData); if (unrolled->OperIs(GT_QMARK)) { // QMARK can't be a root node, spill it to a temp unsigned rootTmp = lvaGrabTemp(true DEBUGARG("spilling unroll qmark")); impAssignTempGen(rootTmp, unrolled); unrolled = gtNewLclvNode(rootTmp, TYP_INT); } JITDUMP("... Successfully unrolled to:\n") DISPTREE(unrolled) for (int i = 0; i < argsCount; i++) { impPopStack(); } // We have to clean up GT_RET_EXPR for String.op_Implicit or MemoryExtensions.AsSpans if ((spanObj != op1) && op1->OperIs(GT_RET_EXPR)) { GenTree* inlineCandidate = op1->AsRetExpr()->gtInlineCandidate; assert(inlineCandidate->IsCall()); inlineCandidate->gtBashToNOP(); } else if ((spanObj != op2) && op2->OperIs(GT_RET_EXPR)) { GenTree* inlineCandidate = op2->AsRetExpr()->gtInlineCandidate; assert(inlineCandidate->IsCall()); inlineCandidate->gtBashToNOP(); } } return unrolled; }
-1
dotnet/runtime
66,235
Revert "Fix issues related to JsonSerializerOptions mutation and caching."
Reverts dotnet/runtime#65863 Fixes #66232
jkotas
2022-03-05T06:23:45Z
2022-03-05T14:08:07Z
17662fc30cfd4cc6e7dbba978a3fb512380c0b70
3ede8095da51d5d27a890020b61376155e9a61c2
Revert "Fix issues related to JsonSerializerOptions mutation and caching.". Reverts dotnet/runtime#65863 Fixes #66232
./src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/ProfileData.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Collections.Generic; using System.Linq; using ILCompiler.IBC; using Internal.Pgo; using Internal.TypeSystem; using Internal.TypeSystem.Ecma; namespace ILCompiler { [Flags] public enum MethodProfilingDataFlags { // Important: update toolbox\ibcmerge\ibcmerge.cs if you change these ReadMethodCode = 0, // 0x00001 // Also means the method was executed ReadMethodDesc = 1, // 0x00002 RunOnceMethod = 2, // 0x00004 RunNeverMethod = 3, // 0x00008 // MethodStoredDataAccess = 4, // 0x00010 // obsolete WriteMethodDesc = 5, // 0x00020 // ReadFCallHash = 6, // 0x00040 // obsolete ReadGCInfo = 7, // 0x00080 CommonReadGCInfo = 8, // 0x00100 // ReadMethodDefRidMap = 9, // 0x00200 // obsolete ReadCerMethodList = 10, // 0x00400 ReadMethodPrecode = 11, // 0x00800 WriteMethodPrecode = 12, // 0x01000 ExcludeHotMethodCode = 13, // 0x02000 // Hot method should be excluded from the ReadyToRun image ExcludeColdMethodCode = 14, // 0x04000 // Cold method should be excluded from the ReadyToRun image DisableInlining = 15, // 0x08000 // Disable inlining of this method in optimized AOT native code } public class MethodProfileData { public MethodProfileData(MethodDesc method, MethodProfilingDataFlags flags, double exclusiveWeight, Dictionary<MethodDesc, int> callWeights, uint scenarioMask, PgoSchemaElem[] schemaData) { if (method == null) throw new ArgumentNullException("method"); Method = method; Flags = flags; ScenarioMask = scenarioMask; ExclusiveWeight = exclusiveWeight; CallWeights = callWeights; SchemaData = schemaData; } public readonly MethodDesc Method; public readonly MethodProfilingDataFlags Flags; public readonly uint ScenarioMask; public readonly double ExclusiveWeight; public readonly Dictionary<MethodDesc, int> CallWeights; public readonly PgoSchemaElem[] SchemaData; } public abstract class ProfileData { public abstract bool PartialNGen { get; } public abstract MethodProfileData GetMethodProfileData(MethodDesc m); public abstract IEnumerable<MethodProfileData> GetAllMethodProfileData(); public abstract byte[] GetMethodBlockCount(MethodDesc m); public static void MergeProfileData(ref bool partialNgen, Dictionary<MethodDesc, MethodProfileData> mergedProfileData, ProfileData profileData) { if (profileData.PartialNGen) partialNgen = true; PgoSchemaElem[][] schemaElemMergerArray = new PgoSchemaElem[2][]; foreach (MethodProfileData data in profileData.GetAllMethodProfileData()) { MethodProfileData dataToMerge; if (mergedProfileData.TryGetValue(data.Method, out dataToMerge)) { var mergedCallWeights = data.CallWeights; if (mergedCallWeights == null) { mergedCallWeights = dataToMerge.CallWeights; } else if (dataToMerge.CallWeights != null) { mergedCallWeights = new Dictionary<MethodDesc, int>(data.CallWeights); foreach (var entry in dataToMerge.CallWeights) { if (mergedCallWeights.TryGetValue(entry.Key, out var initialWeight)) { mergedCallWeights[entry.Key] = initialWeight + entry.Value; } else { mergedCallWeights[entry.Key] = entry.Value; } } } PgoSchemaElem[] mergedSchemaData; if (data.SchemaData == null) { mergedSchemaData = dataToMerge.SchemaData; } else if (dataToMerge.SchemaData == null) { mergedSchemaData = data.SchemaData; } else { // Actually merge schemaElemMergerArray[0] = dataToMerge.SchemaData; schemaElemMergerArray[1] = data.SchemaData; mergedSchemaData = PgoProcessor.Merge<TypeSystemEntityOrUnknown>(schemaElemMergerArray); } mergedProfileData[data.Method] = new MethodProfileData(data.Method, dataToMerge.Flags | data.Flags, data.ExclusiveWeight + dataToMerge.ExclusiveWeight, mergedCallWeights, dataToMerge.ScenarioMask | data.ScenarioMask, mergedSchemaData); } else { mergedProfileData.Add(data.Method, data); } } } } public class EmptyProfileData : ProfileData { private static readonly EmptyProfileData s_singleton = new EmptyProfileData(); private EmptyProfileData() { } public override bool PartialNGen => false; public static EmptyProfileData Singleton => s_singleton; public override MethodProfileData GetMethodProfileData(MethodDesc m) { return null; } public override IEnumerable<MethodProfileData> GetAllMethodProfileData() { return Array.Empty<MethodProfileData>(); } public override byte[] GetMethodBlockCount(MethodDesc m) { return null; } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Collections.Generic; using System.Linq; using ILCompiler.IBC; using Internal.Pgo; using Internal.TypeSystem; using Internal.TypeSystem.Ecma; namespace ILCompiler { [Flags] public enum MethodProfilingDataFlags { // Important: update toolbox\ibcmerge\ibcmerge.cs if you change these ReadMethodCode = 0, // 0x00001 // Also means the method was executed ReadMethodDesc = 1, // 0x00002 RunOnceMethod = 2, // 0x00004 RunNeverMethod = 3, // 0x00008 // MethodStoredDataAccess = 4, // 0x00010 // obsolete WriteMethodDesc = 5, // 0x00020 // ReadFCallHash = 6, // 0x00040 // obsolete ReadGCInfo = 7, // 0x00080 CommonReadGCInfo = 8, // 0x00100 // ReadMethodDefRidMap = 9, // 0x00200 // obsolete ReadCerMethodList = 10, // 0x00400 ReadMethodPrecode = 11, // 0x00800 WriteMethodPrecode = 12, // 0x01000 ExcludeHotMethodCode = 13, // 0x02000 // Hot method should be excluded from the ReadyToRun image ExcludeColdMethodCode = 14, // 0x04000 // Cold method should be excluded from the ReadyToRun image DisableInlining = 15, // 0x08000 // Disable inlining of this method in optimized AOT native code } public class MethodProfileData { public MethodProfileData(MethodDesc method, MethodProfilingDataFlags flags, double exclusiveWeight, Dictionary<MethodDesc, int> callWeights, uint scenarioMask, PgoSchemaElem[] schemaData) { if (method == null) throw new ArgumentNullException("method"); Method = method; Flags = flags; ScenarioMask = scenarioMask; ExclusiveWeight = exclusiveWeight; CallWeights = callWeights; SchemaData = schemaData; } public readonly MethodDesc Method; public readonly MethodProfilingDataFlags Flags; public readonly uint ScenarioMask; public readonly double ExclusiveWeight; public readonly Dictionary<MethodDesc, int> CallWeights; public readonly PgoSchemaElem[] SchemaData; } public abstract class ProfileData { public abstract bool PartialNGen { get; } public abstract MethodProfileData GetMethodProfileData(MethodDesc m); public abstract IEnumerable<MethodProfileData> GetAllMethodProfileData(); public abstract byte[] GetMethodBlockCount(MethodDesc m); public static void MergeProfileData(ref bool partialNgen, Dictionary<MethodDesc, MethodProfileData> mergedProfileData, ProfileData profileData) { if (profileData.PartialNGen) partialNgen = true; PgoSchemaElem[][] schemaElemMergerArray = new PgoSchemaElem[2][]; foreach (MethodProfileData data in profileData.GetAllMethodProfileData()) { MethodProfileData dataToMerge; if (mergedProfileData.TryGetValue(data.Method, out dataToMerge)) { var mergedCallWeights = data.CallWeights; if (mergedCallWeights == null) { mergedCallWeights = dataToMerge.CallWeights; } else if (dataToMerge.CallWeights != null) { mergedCallWeights = new Dictionary<MethodDesc, int>(data.CallWeights); foreach (var entry in dataToMerge.CallWeights) { if (mergedCallWeights.TryGetValue(entry.Key, out var initialWeight)) { mergedCallWeights[entry.Key] = initialWeight + entry.Value; } else { mergedCallWeights[entry.Key] = entry.Value; } } } PgoSchemaElem[] mergedSchemaData; if (data.SchemaData == null) { mergedSchemaData = dataToMerge.SchemaData; } else if (dataToMerge.SchemaData == null) { mergedSchemaData = data.SchemaData; } else { // Actually merge schemaElemMergerArray[0] = dataToMerge.SchemaData; schemaElemMergerArray[1] = data.SchemaData; mergedSchemaData = PgoProcessor.Merge<TypeSystemEntityOrUnknown>(schemaElemMergerArray); } mergedProfileData[data.Method] = new MethodProfileData(data.Method, dataToMerge.Flags | data.Flags, data.ExclusiveWeight + dataToMerge.ExclusiveWeight, mergedCallWeights, dataToMerge.ScenarioMask | data.ScenarioMask, mergedSchemaData); } else { mergedProfileData.Add(data.Method, data); } } } } public class EmptyProfileData : ProfileData { private static readonly EmptyProfileData s_singleton = new EmptyProfileData(); private EmptyProfileData() { } public override bool PartialNGen => false; public static EmptyProfileData Singleton => s_singleton; public override MethodProfileData GetMethodProfileData(MethodDesc m) { return null; } public override IEnumerable<MethodProfileData> GetAllMethodProfileData() { return Array.Empty<MethodProfileData>(); } public override byte[] GetMethodBlockCount(MethodDesc m) { return null; } } }
-1
dotnet/runtime
66,234
Remove compiler warning suppression
Contributes to https://github.com/dotnet/runtime/issues/66154 All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address. ~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~ Upstream PR: https://github.com/libunwind/libunwind/pull/333 /cc @GrabYourPitchforks
AaronRobinsonMSFT
2022-03-05T03:45:49Z
2022-03-09T00:57:12Z
220e67755ae6323a01011b83070dff6f84b02519
853e494abd1c1e12d28a76829b4680af7f694afa
Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154 All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address. ~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~ Upstream PR: https://github.com/libunwind/libunwind/pull/333 /cc @GrabYourPitchforks
./src/coreclr/debug/daccess/daccess.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. //***************************************************************************** // File: daccess.cpp // // // ClrDataAccess implementation. // //***************************************************************************** #include "stdafx.h" #include <clrdata.h> #include "typestring.h" #include "holder.h" #include "debuginfostore.h" #include "peimagelayout.inl" #include "datatargetadapter.h" #include "readonlydatatargetfacade.h" #include "metadataexports.h" #include "excep.h" #include "debugger.h" #include "dwreport.h" #include "primitives.h" #include "dbgutil.h" #ifdef TARGET_UNIX #ifdef USE_DAC_TABLE_RVA #include <dactablerva.h> #else extern "C" bool TryGetSymbol(ICorDebugDataTarget* dataTarget, uint64_t baseAddress, const char* symbolName, uint64_t* symbolAddress); #endif #endif #include "dwbucketmanager.hpp" #include "gcinterface.dac.h" // To include definiton of IsThrowableThreadAbortException // #include <exstatecommon.h> CRITICAL_SECTION g_dacCritSec; ClrDataAccess* g_dacImpl; EXTERN_C #ifdef TARGET_UNIX DLLEXPORT // For Win32 PAL LoadLibrary emulation #endif BOOL WINAPI DllMain(HANDLE instance, DWORD reason, LPVOID reserved) { static bool g_procInitialized = false; switch(reason) { case DLL_PROCESS_ATTACH: { if (g_procInitialized) { #ifdef HOST_UNIX // Double initialization can happen on Unix // in case of manual load of DAC shared lib and calling DllMain // not a big deal, we just ignore it. return TRUE; #else return FALSE; #endif } #ifdef HOST_UNIX int err = PAL_InitializeDLL(); if(err != 0) { return FALSE; } #endif InitializeCriticalSection(&g_dacCritSec); g_procInitialized = true; break; } case DLL_PROCESS_DETACH: // It's possible for this to be called without ATTACH completing (eg. if it failed) if (g_procInitialized) { DeleteCriticalSection(&g_dacCritSec); } g_procInitialized = false; break; } return TRUE; } HRESULT ConvertUtf8(_In_ LPCUTF8 utf8, ULONG32 bufLen, ULONG32* nameLen, _Out_writes_to_opt_(bufLen, *nameLen) PWSTR buffer) { if (nameLen) { *nameLen = WszMultiByteToWideChar(CP_UTF8, 0, utf8, -1, NULL, 0); if (!*nameLen) { return HRESULT_FROM_GetLastError(); } } if (buffer && bufLen) { if (!WszMultiByteToWideChar(CP_UTF8, 0, utf8, -1, buffer, bufLen)) { return HRESULT_FROM_GetLastError(); } } return S_OK; } HRESULT AllocUtf8(_In_opt_ LPCWSTR wstr, ULONG32 srcChars, _Outptr_ LPUTF8* utf8) { ULONG32 chars = WszWideCharToMultiByte(CP_UTF8, 0, wstr, srcChars, NULL, 0, NULL, NULL); if (!chars) { return HRESULT_FROM_GetLastError(); } // Make sure the converted string is always terminated. if (srcChars != (ULONG32)-1) { if (!ClrSafeInt<ULONG32>::addition(chars, 1, chars)) { return HRESULT_FROM_WIN32(ERROR_ARITHMETIC_OVERFLOW); } } char* mem = new (nothrow) char[chars]; if (!mem) { return E_OUTOFMEMORY; } if (!WszWideCharToMultiByte(CP_UTF8, 0, wstr, srcChars, mem, chars, NULL, NULL)) { HRESULT hr = HRESULT_FROM_GetLastError(); delete [] mem; return hr; } if (srcChars != (ULONG32)-1) { mem[chars - 1] = 0; } *utf8 = mem; return S_OK; } HRESULT GetFullClassNameFromMetadata(IMDInternalImport* mdImport, mdTypeDef classToken, ULONG32 bufferChars, _Inout_updates_(bufferChars) LPUTF8 buffer) { HRESULT hr; LPCUTF8 baseName, namespaceName; IfFailRet(mdImport->GetNameOfTypeDef(classToken, &baseName, &namespaceName)); return ns::MakePath(buffer, bufferChars, namespaceName, baseName) ? S_OK : E_OUTOFMEMORY; } HRESULT GetFullMethodNameFromMetadata(IMDInternalImport* mdImport, mdMethodDef methodToken, ULONG32 bufferChars, _Inout_updates_(bufferChars) LPUTF8 buffer) { HRESULT status; HRESULT hr; mdTypeDef classToken; size_t len; if (mdImport->GetParentToken(methodToken, &classToken) == S_OK) { if ((status = GetFullClassNameFromMetadata(mdImport, classToken, bufferChars, buffer)) != S_OK) { return status; } len = strlen(buffer); buffer += len; bufferChars -= static_cast<ULONG32>(len) + 1; if (!bufferChars) { return E_OUTOFMEMORY; } *buffer++ = NAMESPACE_SEPARATOR_CHAR; } LPCUTF8 methodName; IfFailRet(mdImport->GetNameOfMethodDef(methodToken, &methodName)); // Review conversion of size_t to ULONG32. #ifdef _MSC_VER #pragma warning(push) #pragma warning(disable:4267) #endif len = strlen(methodName); #ifdef _MSC_VER #pragma warning(pop) #endif if (len >= bufferChars) { return E_OUTOFMEMORY; } strcpy_s(buffer, bufferChars, methodName); return S_OK; } HRESULT SplitFullName(_In_z_ PCWSTR fullName, SplitSyntax syntax, ULONG32 memberDots, _Outptr_opt_ LPUTF8* namespaceName, _Outptr_opt_ LPUTF8* typeName, _Outptr_opt_ LPUTF8* memberName, _Outptr_opt_ LPUTF8* params) { HRESULT status; PCWSTR paramsStart, memberStart, memberEnd, typeStart; if (!*fullName) { return E_INVALIDARG; } // // Split off parameters. // paramsStart = wcschr(fullName, W('(')); if (paramsStart) { if (syntax != SPLIT_METHOD || paramsStart == fullName) { return E_INVALIDARG; } if ((status = AllocUtf8(paramsStart, (ULONG32)-1, params)) != S_OK) { return status; } memberEnd = paramsStart - 1; } else { *params = NULL; memberEnd = fullName + (wcslen(fullName) - 1); } if (syntax != SPLIT_TYPE) { // // Split off member name. // memberStart = memberEnd; for (;;) { while (memberStart >= fullName && *memberStart != W('.')) { memberStart--; } // Some member names (e.g. .ctor and .dtor) have // dots, so go back to the first dot. while (memberStart > fullName && memberStart[-1] == W('.')) { memberStart--; } if (memberStart <= fullName) { if (memberDots > 0) { // Caller expected dots in the // member name and they weren't found. status = E_INVALIDARG; goto DelParams; } break; } else if (memberDots == 0) { break; } memberStart--; memberDots--; } memberStart++; if (memberStart > memberEnd) { status = E_INVALIDARG; goto DelParams; } if ((status = AllocUtf8(memberStart, (ULONG32) (memberEnd - memberStart) + 1, memberName)) != S_OK) { goto DelParams; } } else { *memberName = NULL; memberStart = memberEnd + 2; } // // Split off type name. // if (memberStart > fullName) { // Must have at least one character for the type // name. If there was a member name, there must // also be a separator. if (memberStart < fullName + 2) { status = E_INVALIDARG; goto DelMember; } typeStart = memberStart - 2; while (typeStart >= fullName && *typeStart != W('.')) { typeStart--; } typeStart++; if ((status = AllocUtf8(typeStart, (ULONG32) (memberStart - typeStart) - 1, typeName)) != S_OK) { goto DelMember; } } else { *typeName = NULL; typeStart = fullName; } // // Namespace must be the rest. // if (typeStart > fullName) { if ((status = AllocUtf8(fullName, (ULONG32) (typeStart - fullName) - 1, namespaceName)) != S_OK) { goto DelType; } } else { *namespaceName = NULL; } return S_OK; DelType: delete [] (*typeName); DelMember: delete [] (*memberName); DelParams: delete [] (*params); return status; } int CompareUtf8(_In_ LPCUTF8 str1, _In_ LPCUTF8 str2, _In_ ULONG32 nameFlags) { if (nameFlags & CLRDATA_BYNAME_CASE_INSENSITIVE) { // XXX Microsoft - Convert to Unicode? return SString::_stricmp(str1, str2); } return strcmp(str1, str2); } //---------------------------------------------------------------------------- // // MetaEnum. // //---------------------------------------------------------------------------- HRESULT MetaEnum::Start(IMDInternalImport* mdImport, ULONG32 kind, mdToken container) { HRESULT status; switch(kind) { case mdtTypeDef: status = mdImport->EnumTypeDefInit(&m_enum); break; case mdtMethodDef: case mdtFieldDef: status = mdImport->EnumInit(kind, container, &m_enum); break; default: return E_INVALIDARG; } if (status != S_OK) { return status; } m_mdImport = mdImport; m_kind = kind; return S_OK; } void MetaEnum::End(void) { if (!m_mdImport) { return; } switch(m_kind) { case mdtTypeDef: case mdtMethodDef: case mdtFieldDef: m_mdImport->EnumClose(&m_enum); break; } Clear(); } HRESULT MetaEnum::NextToken(mdToken* token, _Outptr_opt_result_maybenull_ LPCUTF8* namespaceName, _Outptr_opt_result_maybenull_ LPCUTF8* name) { HRESULT hr; if (!m_mdImport) { return E_INVALIDARG; } switch(m_kind) { case mdtTypeDef: if (!m_mdImport->EnumNext(&m_enum, token)) { return S_FALSE; } m_lastToken = *token; if (namespaceName || name) { LPCSTR _name, _namespaceName; IfFailRet(m_mdImport->GetNameOfTypeDef(*token, &_name, &_namespaceName)); if (namespaceName) { *namespaceName = _namespaceName; } if (name) { *name = _name; } } return S_OK; case mdtMethodDef: if (!m_mdImport->EnumNext(&m_enum, token)) { return S_FALSE; } m_lastToken = *token; if (namespaceName) { *namespaceName = NULL; } if (name != NULL) { IfFailRet(m_mdImport->GetNameOfMethodDef(*token, name)); } return S_OK; case mdtFieldDef: if (!m_mdImport->EnumNext(&m_enum, token)) { return S_FALSE; } m_lastToken = *token; if (namespaceName) { *namespaceName = NULL; } if (name != NULL) { IfFailRet(m_mdImport->GetNameOfFieldDef(*token, name)); } return S_OK; default: return E_INVALIDARG; } } HRESULT MetaEnum::NextDomainToken(AppDomain** appDomain, mdToken* token) { HRESULT status; if (m_appDomain) { // Use only the caller-provided app domain. *appDomain = m_appDomain; return NextToken(token, NULL, NULL); } // // Splay tokens across all app domains. // for (;;) { if (m_lastToken == mdTokenNil) { // Need to fetch a token. if ((status = NextToken(token, NULL, NULL)) != S_OK) { return status; } m_domainIter.Init(); } if (m_domainIter.Next()) { break; } m_lastToken = mdTokenNil; } *appDomain = m_domainIter.GetDomain(); *token = m_lastToken; return S_OK; } HRESULT MetaEnum::NextTokenByName(_In_opt_ LPCUTF8 namespaceName, _In_opt_ LPCUTF8 name, ULONG32 nameFlags, mdToken* token) { HRESULT status; LPCUTF8 tokNamespace, tokName; for (;;) { if ((status = NextToken(token, &tokNamespace, &tokName)) != S_OK) { return status; } if (namespaceName && (!tokNamespace || CompareUtf8(namespaceName, tokNamespace, nameFlags) != 0)) { continue; } if (name && (!tokName || CompareUtf8(name, tokName, nameFlags) != 0)) { continue; } return S_OK; } } HRESULT MetaEnum::NextDomainTokenByName(_In_opt_ LPCUTF8 namespaceName, _In_opt_ LPCUTF8 name, ULONG32 nameFlags, AppDomain** appDomain, mdToken* token) { HRESULT status; if (m_appDomain) { // Use only the caller-provided app domain. *appDomain = m_appDomain; return NextTokenByName(namespaceName, name, nameFlags, token); } // // Splay tokens across all app domains. // for (;;) { if (m_lastToken == mdTokenNil) { // Need to fetch a token. if ((status = NextTokenByName(namespaceName, name, nameFlags, token)) != S_OK) { return status; } m_domainIter.Init(); } if (m_domainIter.Next()) { break; } m_lastToken = mdTokenNil; } *appDomain = m_domainIter.GetDomain(); *token = m_lastToken; return S_OK; } HRESULT MetaEnum::New(Module* mod, ULONG32 kind, mdToken container, IXCLRDataAppDomain* pubAppDomain, MetaEnum** metaEnumRet, CLRDATA_ENUM* handle) { HRESULT status; MetaEnum* metaEnum; if (handle) { *handle = TO_CDENUM(NULL); } metaEnum = new (nothrow) MetaEnum; if (!metaEnum) { return E_OUTOFMEMORY; } if ((status = metaEnum-> Start(mod->GetMDImport(), kind, container)) != S_OK) { delete metaEnum; return status; } if (pubAppDomain) { metaEnum->m_appDomain = ((ClrDataAppDomain*)pubAppDomain)->GetAppDomain(); } if (metaEnumRet) { *metaEnumRet = metaEnum; } if (handle) { *handle = TO_CDENUM(metaEnum); } return S_OK; } //---------------------------------------------------------------------------- // // SplitName // //---------------------------------------------------------------------------- SplitName::SplitName(SplitSyntax syntax, ULONG32 nameFlags, ULONG32 memberDots) { m_syntax = syntax; m_nameFlags = nameFlags; m_memberDots = memberDots; Clear(); } void SplitName::Delete(void) { delete [] m_namespaceName; m_namespaceName = NULL; delete [] m_typeName; m_typeName = NULL; delete [] m_memberName; m_memberName = NULL; delete [] m_params; m_params = NULL; } void SplitName::Clear(void) { m_namespaceName = NULL; m_typeName = NULL; m_typeToken = mdTypeDefNil; m_memberName = NULL; m_memberToken = mdTokenNil; m_params = NULL; m_tlsThread = NULL; m_metaEnum.m_appDomain = NULL; m_module = NULL; m_lastField = NULL; } HRESULT SplitName::SplitString(_In_opt_ PCWSTR fullName) { if (m_syntax == SPLIT_NO_NAME) { if (fullName) { return E_INVALIDARG; } return S_OK; } else if (!fullName) { return E_INVALIDARG; } return SplitFullName(fullName, m_syntax, m_memberDots, &m_namespaceName, &m_typeName, &m_memberName, &m_params); } FORCEINLINE WCHAR* wcrscan(LPCWSTR beg, LPCWSTR end, WCHAR ch) { //_ASSERTE(beg <= end); WCHAR *p; for (p = (WCHAR*)end; p >= beg; --p) { if (*p == ch) break; } return p; } // This functions allocates a new UTF8 string that contains the classname // lying between the current sepName and the previous sepName. E.g. for a // class name of "Outer+middler+inner" when sepName points to the NULL // terminator this function will return "inner" in pResult and will update // sepName to point to the second '+' character in the string. When sepName // points to the first '+' character this function will return "Outer" in // pResult and sepName will point one WCHAR before fullName. HRESULT NextEnclosingClasName(LPCWSTR fullName, _Outref_ LPWSTR& sepName, _Outptr_ LPUTF8 *pResult) { if (sepName < fullName) { return E_FAIL; } //_ASSERTE(*sepName == W('\0') || *sepName == W('+') || *sepName == W('/')); LPWSTR origInnerName = sepName-1; if ((sepName = wcrscan(fullName, origInnerName, W('+'))) < fullName) { sepName = wcrscan(fullName, origInnerName, W('/')); } return AllocUtf8(sepName+1, static_cast<ULONG32>(origInnerName-sepName), pResult); } bool SplitName::FindType(IMDInternalImport* mdInternal) { if (m_typeToken != mdTypeDefNil) { return true; } if (!m_typeName) { return false; } if ((m_namespaceName == NULL || m_namespaceName[0] == '\0') && (CompareUtf8(COR_MODULE_CLASS, m_typeName, m_nameFlags)==0)) { m_typeToken = TokenFromRid(1, mdtTypeDef); // <Module> class always has a RID of 1. return true; } MetaEnum metaEnum; if (metaEnum.Start(mdInternal, mdtTypeDef, mdTypeDefNil) != S_OK) { return false; } LPUTF8 curClassName; ULONG32 length; WCHAR wszName[MAX_CLASS_NAME]; if (ConvertUtf8(m_typeName, MAX_CLASS_NAME, &length, wszName) != S_OK) { return false; } WCHAR *pHead; Retry: pHead = wszName + length; if (FAILED(NextEnclosingClasName(wszName, pHead, &curClassName))) { return false; } // an inner class has an empty namespace associated with it HRESULT hr = metaEnum.NextTokenByName((pHead < wszName) ? m_namespaceName : "", curClassName, m_nameFlags, &m_typeToken); delete[] curClassName; if (hr != S_OK) { // if we didn't find a token with the given name return false; } else if (pHead < wszName) { // if we did find a token, *and* the class name given // does not specify any enclosing class, that's it return true; } else { // restart with innermost class pHead = wszName + length; mdTypeDef tkInner = m_typeToken; mdTypeDef tkOuter; BOOL bRetry = FALSE; LPUTF8 utf8Name; while ( !bRetry && SUCCEEDED(NextEnclosingClasName(wszName, pHead, &utf8Name)) ) { if (mdInternal->GetNestedClassProps(tkInner, &tkOuter) != S_OK) tkOuter = mdTypeDefNil; LPCSTR szName, szNS; if (FAILED(mdInternal->GetNameOfTypeDef(tkInner, &szName, &szNS))) { return false; } bRetry = (CompareUtf8(utf8Name, szName, m_nameFlags) != 0); if (!bRetry) { // if this is outermost class we need to compare namespaces too if (tkOuter == mdTypeDefNil) { // is this the outermost in the class name, too? if (pHead < wszName && CompareUtf8(m_namespaceName ? m_namespaceName : "", szNS, m_nameFlags) == 0) { delete[] utf8Name; return true; } else { bRetry = TRUE; } } } delete[] utf8Name; tkInner = tkOuter; } goto Retry; } } bool SplitName::FindMethod(IMDInternalImport* mdInternal) { if (m_memberToken != mdTokenNil) { return true; } if (m_typeToken == mdTypeDefNil || !m_memberName) { return false; } ULONG32 EmptySig = 0; // XXX Microsoft - Compare using signature when available. if (mdInternal->FindMethodDefUsingCompare(m_typeToken, m_memberName, (PCCOR_SIGNATURE)&EmptySig, sizeof(EmptySig), NULL, NULL, &m_memberToken) != S_OK) { m_memberToken = mdTokenNil; return false; } return true; } bool SplitName::FindField(IMDInternalImport* mdInternal) { if (m_memberToken != mdTokenNil) { return true; } if (m_typeToken == mdTypeDefNil || !m_memberName || m_params) { // Can't have params with a field. return false; } MetaEnum metaEnum; if (metaEnum.Start(mdInternal, mdtFieldDef, m_typeToken) != S_OK) { return false; } return metaEnum.NextTokenByName(NULL, m_memberName, m_nameFlags, &m_memberToken) == S_OK; } HRESULT SplitName::AllocAndSplitString(_In_opt_ PCWSTR fullName, SplitSyntax syntax, ULONG32 nameFlags, ULONG32 memberDots, SplitName** split) { HRESULT status; if (nameFlags & ~(CLRDATA_BYNAME_CASE_SENSITIVE | CLRDATA_BYNAME_CASE_INSENSITIVE)) { return E_INVALIDARG; } *split = new (nothrow) SplitName(syntax, nameFlags, memberDots); if (!*split) { return E_OUTOFMEMORY; } if ((status = (*split)->SplitString(fullName)) != S_OK) { delete (*split); return status; } return S_OK; } HRESULT SplitName::CdStartMethod(_In_opt_ PCWSTR fullName, ULONG32 nameFlags, Module* mod, mdTypeDef typeToken, AppDomain* appDomain, IXCLRDataAppDomain* pubAppDomain, SplitName** splitRet, CLRDATA_ENUM* handle) { HRESULT status; SplitName* split; ULONG methDots = 0; *handle = TO_CDENUM(NULL); Retry: if ((status = SplitName:: AllocAndSplitString(fullName, SPLIT_METHOD, nameFlags, methDots, &split)) != S_OK) { return status; } if (typeToken == mdTypeDefNil) { if (!split->FindType(mod->GetMDImport())) { bool hasNamespace = split->m_namespaceName != NULL; delete split; // // We may have a case where there's an // explicitly implemented method which // has dots in the name. If it's possible // to move the method name dot split // back, go ahead and retry that way. // if (hasNamespace) { methDots++; goto Retry; } return E_INVALIDARG; } typeToken = split->m_typeToken; } else { if (split->m_namespaceName || split->m_typeName) { delete split; return E_INVALIDARG; } } if ((status = split->m_metaEnum. Start(mod->GetMDImport(), mdtMethodDef, typeToken)) != S_OK) { delete split; return status; } split->m_metaEnum.m_appDomain = appDomain; if (pubAppDomain) { split->m_metaEnum.m_appDomain = ((ClrDataAppDomain*)pubAppDomain)->GetAppDomain(); } split->m_module = mod; *handle = TO_CDENUM(split); if (splitRet) { *splitRet = split; } return S_OK; } HRESULT SplitName::CdNextMethod(CLRDATA_ENUM* handle, mdMethodDef* token) { SplitName* split = FROM_CDENUM(SplitName, *handle); if (!split) { return E_INVALIDARG; } return split->m_metaEnum. NextTokenByName(NULL, split->m_memberName, split->m_nameFlags, token); } HRESULT SplitName::CdNextDomainMethod(CLRDATA_ENUM* handle, AppDomain** appDomain, mdMethodDef* token) { SplitName* split = FROM_CDENUM(SplitName, *handle); if (!split) { return E_INVALIDARG; } return split->m_metaEnum. NextDomainTokenByName(NULL, split->m_memberName, split->m_nameFlags, appDomain, token); } HRESULT SplitName::CdStartField(_In_opt_ PCWSTR fullName, ULONG32 nameFlags, ULONG32 fieldFlags, IXCLRDataTypeInstance* fromTypeInst, TypeHandle typeHandle, Module* mod, mdTypeDef typeToken, ULONG64 objBase, Thread* tlsThread, IXCLRDataTask* pubTlsThread, AppDomain* appDomain, IXCLRDataAppDomain* pubAppDomain, SplitName** splitRet, CLRDATA_ENUM* handle) { HRESULT status; SplitName* split; *handle = TO_CDENUM(NULL); if ((status = SplitName:: AllocAndSplitString(fullName, fullName ? SPLIT_FIELD : SPLIT_NO_NAME, nameFlags, 0, &split)) != S_OK) { return status; } if (typeHandle.IsNull()) { if (typeToken == mdTypeDefNil) { if (!split->FindType(mod->GetMDImport())) { status = E_INVALIDARG; goto Fail; } typeToken = split->m_typeToken; } else { if (split->m_namespaceName || split->m_typeName) { status = E_INVALIDARG; goto Fail; } } // With phased class loading, this may return a partially-loaded type // @todo : does this matter? typeHandle = mod->LookupTypeDef(split->m_typeToken); if (typeHandle.IsNull()) { status = E_UNEXPECTED; goto Fail; } } if ((status = InitFieldIter(&split->m_fieldEnum, typeHandle, true, fieldFlags, fromTypeInst)) != S_OK) { goto Fail; } split->m_objBase = objBase; split->m_tlsThread = tlsThread; if (pubTlsThread) { split->m_tlsThread = ((ClrDataTask*)pubTlsThread)->GetThread(); } split->m_metaEnum.m_appDomain = appDomain; if (pubAppDomain) { split->m_metaEnum.m_appDomain = ((ClrDataAppDomain*)pubAppDomain)->GetAppDomain(); } split->m_module = mod; *handle = TO_CDENUM(split); if (splitRet) { *splitRet = split; } return S_OK; Fail: delete split; return status; } HRESULT SplitName::CdNextField(ClrDataAccess* dac, CLRDATA_ENUM* handle, IXCLRDataTypeDefinition** fieldType, ULONG32* fieldFlags, IXCLRDataValue** value, ULONG32 nameBufRetLen, ULONG32* nameLenRet, _Out_writes_to_opt_(nameBufRetLen, *nameLenRet) WCHAR nameBufRet[ ], IXCLRDataModule** tokenScopeRet, mdFieldDef* tokenRet) { HRESULT status; SplitName* split = FROM_CDENUM(SplitName, *handle); if (!split) { return E_INVALIDARG; } FieldDesc* fieldDesc; while ((fieldDesc = split->m_fieldEnum.Next())) { if (split->m_syntax != SPLIT_NO_NAME) { LPCUTF8 fieldName; if (FAILED(fieldDesc->GetName_NoThrow(&fieldName)) || (split->Compare(split->m_memberName, fieldName) != 0)) { continue; } } split->m_lastField = fieldDesc; if (fieldFlags != NULL) { *fieldFlags = GetTypeFieldValueFlags(fieldDesc->GetFieldTypeHandleThrowing(), fieldDesc, split->m_fieldEnum. IsFieldFromParentClass() ? CLRDATA_FIELD_IS_INHERITED : 0, false); } if ((nameBufRetLen != 0) || (nameLenRet != NULL)) { LPCUTF8 szFieldName; status = fieldDesc->GetName_NoThrow(&szFieldName); if (status != S_OK) { return status; } status = ConvertUtf8( szFieldName, nameBufRetLen, nameLenRet, nameBufRet); if (status != S_OK) { return status; } } if (tokenScopeRet && !value) { *tokenScopeRet = new (nothrow) ClrDataModule(dac, fieldDesc->GetModule()); if (!*tokenScopeRet) { return E_OUTOFMEMORY; } } if (tokenRet) { *tokenRet = fieldDesc->GetMemberDef(); } if (fieldType) { TypeHandle fieldTypeHandle = fieldDesc->GetFieldTypeHandleThrowing(); *fieldType = new (nothrow) ClrDataTypeDefinition(dac, fieldTypeHandle.GetModule(), fieldTypeHandle.GetMethodTable()->GetCl(), fieldTypeHandle); if (!*fieldType && tokenScopeRet) { delete (ClrDataModule*)*tokenScopeRet; } return *fieldType ? S_OK : E_OUTOFMEMORY; } if (value) { return ClrDataValue:: NewFromFieldDesc(dac, split->m_metaEnum.m_appDomain, split->m_fieldEnum.IsFieldFromParentClass() ? CLRDATA_VALUE_IS_INHERITED : 0, fieldDesc, split->m_objBase, split->m_tlsThread, NULL, value, nameBufRetLen, nameLenRet, nameBufRet, tokenScopeRet, tokenRet); } return S_OK; } return S_FALSE; } HRESULT SplitName::CdNextDomainField(ClrDataAccess* dac, CLRDATA_ENUM* handle, IXCLRDataValue** value) { HRESULT status; SplitName* split = FROM_CDENUM(SplitName, *handle); if (!split) { return E_INVALIDARG; } if (split->m_metaEnum.m_appDomain) { // Use only the caller-provided app domain. return CdNextField(dac, handle, NULL, NULL, value, 0, NULL, NULL, NULL, NULL); } // // Splay fields across all app domains. // for (;;) { if (!split->m_lastField) { // Need to fetch a field. if ((status = CdNextField(dac, handle, NULL, NULL, NULL, 0, NULL, NULL, NULL, NULL)) != S_OK) { return status; } split->m_metaEnum.m_domainIter.Init(); } if (split->m_metaEnum.m_domainIter.Next()) { break; } split->m_lastField = NULL; } return ClrDataValue:: NewFromFieldDesc(dac, split->m_metaEnum.m_domainIter.GetDomain(), split->m_fieldEnum.IsFieldFromParentClass() ? CLRDATA_VALUE_IS_INHERITED : 0, split->m_lastField, split->m_objBase, split->m_tlsThread, NULL, value, 0, NULL, NULL, NULL, NULL); } HRESULT SplitName::CdStartType(_In_opt_ PCWSTR fullName, ULONG32 nameFlags, Module* mod, AppDomain* appDomain, IXCLRDataAppDomain* pubAppDomain, SplitName** splitRet, CLRDATA_ENUM* handle) { HRESULT status; SplitName* split; *handle = TO_CDENUM(NULL); if ((status = SplitName:: AllocAndSplitString(fullName, SPLIT_TYPE, nameFlags, 0, &split)) != S_OK) { return status; } if ((status = split->m_metaEnum. Start(mod->GetMDImport(), mdtTypeDef, mdTokenNil)) != S_OK) { delete split; return status; } split->m_metaEnum.m_appDomain = appDomain; if (pubAppDomain) { split->m_metaEnum.m_appDomain = ((ClrDataAppDomain*)pubAppDomain)->GetAppDomain(); } split->m_module = mod; *handle = TO_CDENUM(split); if (splitRet) { *splitRet = split; } return S_OK; } HRESULT SplitName::CdNextType(CLRDATA_ENUM* handle, mdTypeDef* token) { SplitName* split = FROM_CDENUM(SplitName, *handle); if (!split) { return E_INVALIDARG; } return split->m_metaEnum. NextTokenByName(split->m_namespaceName, split->m_typeName, split->m_nameFlags, token); } HRESULT SplitName::CdNextDomainType(CLRDATA_ENUM* handle, AppDomain** appDomain, mdTypeDef* token) { SplitName* split = FROM_CDENUM(SplitName, *handle); if (!split) { return E_INVALIDARG; } return split->m_metaEnum. NextDomainTokenByName(split->m_namespaceName, split->m_typeName, split->m_nameFlags, appDomain, token); } //---------------------------------------------------------------------------- // // DacInstanceManager. // // Data retrieved from the target process is cached for two reasons: // // 1. It may be necessary to map from the host address back to the target // address. For example, if any code uses a 'this' pointer or // takes the address of a field the address has to be translated from // host to target. This requires instances to be held as long as // they may be referenced. // // 2. Data is often referenced multiple times so caching is an important // performance advantage. // // Ideally we'd like to implement a simple page cache but this is // complicated by the fact that user minidump memory can have // arbitrary granularity and also that the member operator (->) // needs to return a pointer to an object. That means that all of // the data for an object must be sequential and cannot be split // at page boundaries. // // Data can also be accessed with different sizes. For example, // a base struct can be accessed, then cast to a derived struct and // accessed again with the larger derived size. The cache must // be able to replace data to maintain the largest amount of data // touched. // // We keep track of each access and the recovered memory for it. // A hash on target address allows quick access to instance data // by target address. The data for each access has a header on it // for bookkeeping purposes, so host address to target address translation // is just a matter of backing up to the header and pulling the target // address from it. Keeping each access separately allows easy // replacement by larger accesses. // //---------------------------------------------------------------------------- DacInstanceManager::DacInstanceManager(void) : m_unusedBlock(NULL) { InitEmpty(); } DacInstanceManager::~DacInstanceManager(void) { // We are stopping debugging in this case, so don't save any block of memory. // Otherwise, there will be a memory leak. Flush(false); } #if defined(DAC_HASHTABLE) DAC_INSTANCE* DacInstanceManager::Add(DAC_INSTANCE* inst) { // Assert that we don't add NULL instances. This allows us to assert that found instances // are not NULL in DacInstanceManager::Find _ASSERTE(inst != NULL); DWORD nHash = DAC_INSTANCE_HASH(inst->addr); HashInstanceKeyBlock* block = m_hash[nHash]; if (!block || block->firstElement == 0) { HashInstanceKeyBlock* newBlock; if (block) { newBlock = (HashInstanceKeyBlock*) new (nothrow) BYTE[HASH_INSTANCE_BLOCK_ALLOC_SIZE]; } else { // We allocate one big memory chunk that has a block for every index of the hash table to // improve data locality and reduce the number of allocs. In most cases, a hash bucket will // use only one block, so improving data locality across blocks (i.e. keeping the buckets of the // hash table together) should help. newBlock = (HashInstanceKeyBlock*) ClrVirtualAlloc(NULL, HASH_INSTANCE_BLOCK_ALLOC_SIZE*ARRAY_SIZE(m_hash), MEM_COMMIT, PAGE_READWRITE); } if (!newBlock) { return NULL; } if (block) { // We add the newest block to the start of the list assuming that most accesses are for // recently added elements. newBlock->next = block; m_hash[nHash] = newBlock; // The previously allocated block newBlock->firstElement = HASH_INSTANCE_BLOCK_NUM_ELEMENTS; block = newBlock; } else { for (DWORD j = 0; j < ARRAY_SIZE(m_hash); j++) { m_hash[j] = newBlock; newBlock->next = NULL; // The previously allocated block newBlock->firstElement = HASH_INSTANCE_BLOCK_NUM_ELEMENTS; newBlock = (HashInstanceKeyBlock*) (((BYTE*) newBlock) + HASH_INSTANCE_BLOCK_ALLOC_SIZE); } block = m_hash[nHash]; } } _ASSERTE(block->firstElement > 0); block->firstElement--; block->instanceKeys[block->firstElement].addr = inst->addr; block->instanceKeys[block->firstElement].instance = inst; inst->next = NULL; return inst; } #else //DAC_HASHTABLE DAC_INSTANCE* DacInstanceManager::Add(DAC_INSTANCE* inst) { _ASSERTE(inst != NULL); #ifdef _DEBUG bool isInserted = (m_hash.find(inst->addr) == m_hash.end()); #endif //_DEBUG DAC_INSTANCE *(&target) = m_hash[inst->addr]; _ASSERTE(!isInserted || target == NULL); if( target != NULL ) { //This is necessary to preserve the semantics of Supersede, however, it //is more or less dead code. inst->next = target; target = inst; //verify descending order _ASSERTE(inst->size >= target->size); } else { target = inst; } return inst; } #endif // #if defined(DAC_HASHTABLE) DAC_INSTANCE* DacInstanceManager::Alloc(TADDR addr, ULONG32 size, DAC_USAGE_TYPE usage) { SUPPORTS_DAC_HOST_ONLY; DAC_INSTANCE_BLOCK* block; DAC_INSTANCE* inst; ULONG32 fullSize; static_assert_no_msg(sizeof(DAC_INSTANCE_BLOCK) <= DAC_INSTANCE_ALIGN); static_assert_no_msg((sizeof(DAC_INSTANCE) & (DAC_INSTANCE_ALIGN - 1)) == 0); // // All allocated instances must be kept alive as long // as anybody may have a host pointer for one of them. // This means that we cannot delete an arbitrary instance // unless we are sure no pointers exist, which currently // is not possible to determine, thus we just hold everything // until a Flush. This greatly simplifies instance allocation // as we can then just sweep through large blocks rather // than having to use a real allocator. The only // complication is that we need to keep all instance // data aligned. We have guaranteed that the header will // preserve alignment of the data following if the header // is aligned, so as long as we round up all allocations // to a multiple of the alignment size everything just works. // fullSize = (size + DAC_INSTANCE_ALIGN - 1) & ~(DAC_INSTANCE_ALIGN - 1); _ASSERTE(fullSize && fullSize <= 0xffffffff - 2 * sizeof(*inst)); fullSize += sizeof(*inst); // // Check for an existing block with space. // for (block = m_blocks; block; block = block->next) { if (fullSize <= block->bytesFree) { break; } } if (!block) { // // No existing block has enough space, so allocate a new // one if necessary and link it in. We know we're allocating large // blocks so directly VirtualAlloc. We save one block through a // flush so that we spend less time allocating/deallocating. // ULONG32 blockSize = fullSize + DAC_INSTANCE_ALIGN; if (blockSize < DAC_INSTANCE_BLOCK_ALLOCATION) { blockSize = DAC_INSTANCE_BLOCK_ALLOCATION; } // If we have a saved block and it's large enough, use it. block = m_unusedBlock; if ((block != NULL) && ((block->bytesUsed + block->bytesFree) >= blockSize)) { m_unusedBlock = NULL; // Right now, we're locked to DAC_INSTANCE_BLOCK_ALLOCATION but // that might change in the future if we decide to do something // else with the size guarantee in code:DacInstanceManager::FreeAllBlocks blockSize = block->bytesUsed + block->bytesFree; } else { block = (DAC_INSTANCE_BLOCK*) ClrVirtualAlloc(NULL, blockSize, MEM_COMMIT, PAGE_READWRITE); } if (!block) { return NULL; } // Keep the first aligned unit for the block header. block->bytesUsed = DAC_INSTANCE_ALIGN; block->bytesFree = blockSize - DAC_INSTANCE_ALIGN; block->next = m_blocks; m_blocks = block; m_blockMemUsage += blockSize; } inst = (DAC_INSTANCE*)((PBYTE)block + block->bytesUsed); block->bytesUsed += fullSize; _ASSERTE(block->bytesFree >= fullSize); block->bytesFree -= fullSize; inst->next = NULL; inst->addr = addr; inst->size = size; inst->sig = DAC_INSTANCE_SIG; inst->usage = usage; inst->enumMem = 0; inst->MDEnumed = 0; m_numInst++; m_instMemUsage += fullSize; return inst; } void DacInstanceManager::ReturnAlloc(DAC_INSTANCE* inst) { SUPPORTS_DAC_HOST_ONLY; DAC_INSTANCE_BLOCK* block; DAC_INSTANCE_BLOCK * pPrevBlock; ULONG32 fullSize; // // This special routine handles cleanup in // cases where an instances has been allocated // but must be returned due to a following error. // The given instance must be the last instance // in an existing block. // fullSize = ((inst->size + DAC_INSTANCE_ALIGN - 1) & ~(DAC_INSTANCE_ALIGN - 1)) + sizeof(*inst); pPrevBlock = NULL; for (block = m_blocks; block; pPrevBlock = block, block = block->next) { if ((PBYTE)inst == (PBYTE)block + (block->bytesUsed - fullSize)) { break; } } if (!block) { return; } block->bytesUsed -= fullSize; block->bytesFree += fullSize; m_numInst--; m_instMemUsage -= fullSize; // If the block is empty after returning the specified instance, that means this block was newly created // when this instance was allocated. We have seen cases where we are asked to allocate a // large chunk of memory only to fail to read the memory from a dump later on, i.e. when both the target // address and the size are invalid. If we keep the allocation, we'll grow the VM size unnecessarily. // Thus, release a block if it's empty and if it's not the default size (to avoid thrashing memory). // See Dev10 Dbug 812112 for more information. if ((block->bytesUsed == DAC_INSTANCE_ALIGN) && ((block->bytesFree + block->bytesUsed) != DAC_INSTANCE_BLOCK_ALLOCATION)) { // The empty block is at the beginning of the list. if (pPrevBlock == NULL) { m_blocks = block->next; } else { _ASSERTE(pPrevBlock->next == block); pPrevBlock->next = block->next; } ClrVirtualFree(block, 0, MEM_RELEASE); } } #if defined(DAC_HASHTABLE) DAC_INSTANCE* DacInstanceManager::Find(TADDR addr) { #if defined(DAC_MEASURE_PERF) unsigned _int64 nStart, nEnd; g_nFindCalls++; nStart = GetCycleCount(); #endif // #if defined(DAC_MEASURE_PERF) HashInstanceKeyBlock* block = m_hash[DAC_INSTANCE_HASH(addr)]; #if defined(DAC_MEASURE_PERF) nEnd = GetCycleCount(); g_nFindHashTotalTime += nEnd - nStart; #endif // #if defined(DAC_MEASURE_PERF) while (block) { DWORD nIndex = block->firstElement; for (; nIndex < HASH_INSTANCE_BLOCK_NUM_ELEMENTS; nIndex++) { if (block->instanceKeys[nIndex].addr == addr) { #if defined(DAC_MEASURE_PERF) nEnd = GetCycleCount(); g_nFindHits++; g_nFindTotalTime += nEnd - nStart; if (g_nStackWalk) g_nFindStackTotalTime += nEnd - nStart; #endif // #if defined(DAC_MEASURE_PERF) DAC_INSTANCE* inst = block->instanceKeys[nIndex].instance; // inst should not be NULL even if the address was superseded. We search // the entries in the reverse order they were added. So we should have // found the superseding entry before this one. (Of course, if a NULL instance // has been added, this assert is meaningless. DacInstanceManager::Add // asserts that NULL instances aren't added.) _ASSERTE(inst != NULL); return inst; } } block = block->next; } #if defined(DAC_MEASURE_PERF) nEnd = GetCycleCount(); g_nFindFails++; g_nFindTotalTime += nEnd - nStart; if (g_nStackWalk) g_nFindStackTotalTime += nEnd - nStart; #endif // #if defined(DAC_MEASURE_PERF) return NULL; } #else //DAC_HASHTABLE DAC_INSTANCE* DacInstanceManager::Find(TADDR addr) { DacInstanceHashIterator iter = m_hash.find(addr); if( iter == m_hash.end() ) { return NULL; } else { return iter->second; } } #endif // if defined(DAC_HASHTABLE) HRESULT DacInstanceManager::Write(DAC_INSTANCE* inst, bool throwEx) { HRESULT status; if (inst->usage == DAC_VPTR) { // Skip over the host-side vtable pointer when // writing back. status = DacWriteAll(inst->addr + sizeof(TADDR), (PBYTE)(inst + 1) + sizeof(PVOID), inst->size - sizeof(TADDR), throwEx); } else { // Write the whole instance back. status = DacWriteAll(inst->addr, inst + 1, inst->size, throwEx); } return status; } #if defined(DAC_HASHTABLE) void DacInstanceManager::Supersede(DAC_INSTANCE* inst) { _ASSERTE(inst != NULL); // // This instance has been superseded by a larger // one and so must be removed from the hash. However, // code may be holding the instance pointer so it // can't just be deleted. Put it on a list for // later cleanup. // HashInstanceKeyBlock* block = m_hash[DAC_INSTANCE_HASH(inst->addr)]; while (block) { DWORD nIndex = block->firstElement; for (; nIndex < HASH_INSTANCE_BLOCK_NUM_ELEMENTS; nIndex++) { if (block->instanceKeys[nIndex].instance == inst) { block->instanceKeys[nIndex].instance = NULL; break; } } if (nIndex < HASH_INSTANCE_BLOCK_NUM_ELEMENTS) { break; } block = block->next; } AddSuperseded(inst); } #else //DAC_HASHTABLE void DacInstanceManager::Supersede(DAC_INSTANCE* inst) { _ASSERTE(inst != NULL); // // This instance has been superseded by a larger // one and so must be removed from the hash. However, // code may be holding the instance pointer so it // can't just be deleted. Put it on a list for // later cleanup. // DacInstanceHashIterator iter = m_hash.find(inst->addr); if( iter == m_hash.end() ) return; DAC_INSTANCE** bucket = &(iter->second); DAC_INSTANCE* cur = *bucket; DAC_INSTANCE* prev = NULL; //walk through the chain looking for this particular instance while (cur) { if (cur == inst) { if (!prev) { *bucket = inst->next; } else { prev->next = inst->next; } break; } prev = cur; cur = cur->next; } AddSuperseded(inst); } #endif // if defined(DAC_HASHTABLE) // This is the default Flush() called when the DAC cache is invalidated, // e.g. when we continue the debuggee process. In this case, we want to // save one block of memory to avoid thrashing. See the usage of m_unusedBlock // for more information. void DacInstanceManager::Flush(void) { Flush(true); } void DacInstanceManager::Flush(bool fSaveBlock) { SUPPORTS_DAC_HOST_ONLY; // // All allocated memory is in the block // list, so just free the blocks and // forget all the internal pointers. // for (;;) { FreeAllBlocks(fSaveBlock); DAC_INSTANCE_PUSH* push = m_instPushed; if (!push) { break; } m_instPushed = push->next; m_blocks = push->blocks; delete push; } // If we are not saving any memory blocks, then clear the saved buffer block (if any) as well. if (!fSaveBlock) { if (m_unusedBlock != NULL) { ClrVirtualFree(m_unusedBlock, 0, MEM_RELEASE); m_unusedBlock = NULL; } } #if defined(DAC_HASHTABLE) for (int i = STRING_LENGTH(m_hash); i >= 0; i--) { HashInstanceKeyBlock* block = m_hash[i]; HashInstanceKeyBlock* next; while (block) { next = block->next; if (next) { delete [] block; } else if (i == 0) { ClrVirtualFree(block, 0, MEM_RELEASE); } block = next; } } #else //DAC_HASHTABLE m_hash.clear(); #endif //DAC_HASHTABLE InitEmpty(); } #if defined(DAC_HASHTABLE) void DacInstanceManager::ClearEnumMemMarker(void) { ULONG i; DAC_INSTANCE* inst; for (i = 0; i < ARRAY_SIZE(m_hash); i++) { HashInstanceKeyBlock* block = m_hash[i]; while (block) { DWORD j; for (j = block->firstElement; j < HASH_INSTANCE_BLOCK_NUM_ELEMENTS; j++) { inst = block->instanceKeys[j].instance; if (inst != NULL) { inst->enumMem = 0; } } block = block->next; } } for (inst = m_superseded; inst; inst = inst->next) { inst->enumMem = 0; } } #else //DAC_HASHTABLE void DacInstanceManager::ClearEnumMemMarker(void) { ULONG i; DAC_INSTANCE* inst; DacInstanceHashIterator end = m_hash.end(); /* REVISIT_TODO Fri 10/20/2006 * This might have an issue, since it might miss chained entries off of * ->next. However, ->next is going away, and for all intents and * purposes, this never happens. */ for( DacInstanceHashIterator cur = m_hash.begin(); cur != end; ++cur ) { cur->second->enumMem = 0; } for (inst = m_superseded; inst; inst = inst->next) { inst->enumMem = 0; } } #endif // if defined(DAC_HASHTABLE) #if defined(DAC_HASHTABLE) // // // Iterating through all of the hash entry and report the memory // instance to minidump // // This function returns the total number of bytes that it reported. // // UINT DacInstanceManager::DumpAllInstances( ICLRDataEnumMemoryRegionsCallback *pCallBack) // memory report call back { ULONG i; DAC_INSTANCE* inst; UINT cbTotal = 0; #if defined(DAC_MEASURE_PERF) FILE* fp = fopen("c:\\dumpLog.txt", "a"); int total = 0; #endif // #if defined(DAC_MEASURE_PERF) for (i = 0; i < ARRAY_SIZE(m_hash); i++) { #if defined(DAC_MEASURE_PERF) int numInBucket = 0; #endif // #if defined(DAC_MEASURE_PERF) HashInstanceKeyBlock* block = m_hash[i]; while (block) { DWORD j; for (j = block->firstElement; j < HASH_INSTANCE_BLOCK_NUM_ELEMENTS; j++) { inst = block->instanceKeys[j].instance; // Only report those we intended to. // So far, only metadata is excluded! // if (inst && inst->noReport == 0) { cbTotal += inst->size; HRESULT hr = pCallBack->EnumMemoryRegion(TO_CDADDR(inst->addr), inst->size); if (hr == COR_E_OPERATIONCANCELED) { ThrowHR(hr); } } #if defined(DAC_MEASURE_PERF) if (inst) { numInBucket++; } #endif // #if defined(DAC_MEASURE_PERF) } block = block->next; } #if defined(DAC_MEASURE_PERF) fprintf(fp, "%4d: %4d%s", i, numInBucket, (i+1)%5? "; " : "\n"); total += numInBucket; #endif // #if defined(DAC_MEASURE_PERF) } #if defined(DAC_MEASURE_PERF) fprintf(fp, "\n\nTotal entries: %d\n\n", total); fclose(fp); #endif // #if defined(DAC_MEASURE_PERF) return cbTotal; } #else //DAC_HASHTABLE // // // Iterating through all of the hash entry and report the memory // instance to minidump // // This function returns the total number of bytes that it reported. // // UINT DacInstanceManager::DumpAllInstances( ICLRDataEnumMemoryRegionsCallback *pCallBack) // memory report call back { SUPPORTS_DAC_HOST_ONLY; DAC_INSTANCE* inst; UINT cbTotal = 0; #if defined(DAC_MEASURE_PERF) FILE* fp = fopen("c:\\dumpLog.txt", "a"); #endif // #if defined(DAC_MEASURE_PERF) #if defined(DAC_MEASURE_PERF) int numInBucket = 0; #endif // #if defined(DAC_MEASURE_PERF) DacInstanceHashIterator end = m_hash.end(); for (DacInstanceHashIterator cur = m_hash.begin(); end != cur; ++cur) { inst = cur->second; // Only report those we intended to. // So far, only metadata is excluded! // if (inst->noReport == 0) { cbTotal += inst->size; HRESULT hr = pCallBack->EnumMemoryRegion(TO_CDADDR(inst->addr), inst->size); if (hr == COR_E_OPERATIONCANCELED) { ThrowHR(hr); } } #if defined(DAC_MEASURE_PERF) numInBucket++; #endif // #if defined(DAC_MEASURE_PERF) } #if defined(DAC_MEASURE_PERF) fprintf(fp, "\n\nTotal entries: %d\n\n", numInBucket); fclose(fp); #endif // #if defined(DAC_MEASURE_PERF) return cbTotal; } #endif // if defined(DAC_HASHTABLE) DAC_INSTANCE_BLOCK* DacInstanceManager::FindInstanceBlock(DAC_INSTANCE* inst) { for (DAC_INSTANCE_BLOCK* block = m_blocks; block; block = block->next) { if ((PBYTE)inst >= (PBYTE)block && (PBYTE)inst < (PBYTE)block + block->bytesUsed) { return block; } } return NULL; } // If fSaveBlock is false, free all blocks of allocated memory. Otherwise, // free all blocks except the one we save to avoid thrashing memory. // Callers very frequently flush repeatedly with little memory needed in DAC // so this avoids wasteful repeated allocations/deallocations. // There is a very unlikely case that we'll have allocated an extremely large // block; if this is the only block we will save none since this block will // remain allocated. void DacInstanceManager::FreeAllBlocks(bool fSaveBlock) { DAC_INSTANCE_BLOCK* block; while ((block = m_blocks)) { m_blocks = block->next; // If we haven't saved our single block yet and this block is the default size // then we will save it instead of freeing it. This avoids saving an unnecessarily large // memory block. // Do *NOT* trash the byte counts. code:DacInstanceManager::Alloc // depends on them being correct when checking to see if a block is large enough. if (fSaveBlock && (m_unusedBlock == NULL) && ((block->bytesFree + block->bytesUsed) == DAC_INSTANCE_BLOCK_ALLOCATION)) { // Just to avoid confusion, since we're keeping it around. block->next = NULL; m_unusedBlock = block; } else { ClrVirtualFree(block, 0, MEM_RELEASE); } } } //---------------------------------------------------------------------------- // // DacStreamManager. // //---------------------------------------------------------------------------- #ifdef FEATURE_MINIMETADATA_IN_TRIAGEDUMPS namespace serialization { namespace bin { //======================================================================== // Support functions for binary serialization of simple types to a buffer: // - raw_size() returns the size in bytes of the binary representation // of a value. // - raw_serialize() copies the binary representation of a value into a // buffer. // - raw_deserialize() generates a value from its binary representation // in a buffer. // Beyond simple types the APIs below support SString instances. SStrings // are stored as UTF8 strings. //======================================================================== static const size_t ErrOverflow = (size_t)(-1); #ifndef TARGET_UNIX // Template class is_blittable template <typename _Ty, typename Enable = void> struct is_blittable : std::false_type { // determines whether _Ty is blittable }; template <typename _Ty> struct is_blittable<_Ty, typename std::enable_if<std::is_arithmetic<_Ty>::value>::type> : std::true_type { // determines whether _Ty is blittable }; // allow types to declare themselves blittable by including a static bool // member "is_blittable". template <typename _Ty> struct is_blittable<_Ty, typename std::enable_if<_Ty::is_blittable>::type> : std::true_type { // determines whether _Ty is blittable }; //======================================================================== // serialization::bin::Traits<T> enables binary serialization and // deserialization of instances of T. //======================================================================== // // General specialization for non-blittable types - must be overridden // for each specific non-blittable type. // template <typename T, typename Enable = void> class Traits { public: static FORCEINLINE size_t raw_size(const T & val) { static_assert(false, "Non-blittable types need explicit specializations"); } }; // // General type trait supporting serialization/deserialization of blittable // type arguments (as defined by the is_blittable<> type traits above). // template <typename T> class Traits<T, typename std::enable_if<is_blittable<T>::value>::type> { #else // TARGET_UNIX template <typename T> class Traits { #endif // !TARGET_UNIX public: // // raw_size() returns the size in bytes of the binary representation of a // value. // static FORCEINLINE size_t raw_size(const T & val) { return sizeof(T); } // // raw_serialize() copies the binary representation of a value into a // "dest" buffer that has "destSize" bytes available. // Returns raw_size(val), or ErrOverflow if the buffer does not have // enough space to accommodate "val". // static FORCEINLINE size_t raw_serialize(BYTE* dest, size_t destSize, const T & val) { size_t cnt = raw_size(val); if (destSize < cnt) { return ErrOverflow; } memcpy_s(dest, destSize, &val, cnt); return cnt; } // // raw_deserialize() generates a value "val" from its binary // representation in a buffer "src". // Returns raw_size(val), or ErrOverflow if the buffer does not have // enough space to accommodate "val". // static FORCEINLINE size_t raw_deserialize(T & val, const BYTE* src, size_t srcSize) { size_t cnt = raw_size(*(T*)src); if (srcSize < cnt) { return ErrOverflow; } memcpy_s(&val, cnt, src, cnt); return cnt; } }; // // Specialization for UTF8 strings // template<> class Traits<LPCUTF8> { public: static FORCEINLINE size_t raw_size(const LPCUTF8 & val) { return strlen(val) + 1; } static FORCEINLINE size_t raw_serialize(BYTE* dest, size_t destSize, const LPCUTF8 & val) { size_t cnt = raw_size(val); if (destSize < cnt) { return ErrOverflow; } memcpy_s(dest, destSize, &val, cnt); return cnt; } static FORCEINLINE size_t raw_deserialize(LPCUTF8 & val, const BYTE* src, size_t srcSize) { size_t cnt = strnlen((LPCUTF8)src, srcSize) + 1; // assert we found a NULL terminated string at "src" if (srcSize < cnt) { return ErrOverflow; } // we won't allocate another buffer for this string val = (LPCUTF8)src; return cnt; } }; // // Specialization for SString. // SString serialization/deserialization is performed to/from a UTF8 // string. // template<> class Traits<SString> { public: static FORCEINLINE size_t raw_size(const SString & val) { StackSString s; val.ConvertToUTF8(s); // make sure to include the NULL terminator return s.GetCount() + 1; } static FORCEINLINE size_t raw_serialize(BYTE* dest, size_t destSize, const SString & val) { // instead of calling raw_size() we inline it here, so we can reuse // the UTF8 string obtained below as an argument to memcpy. StackSString s; val.ConvertToUTF8(s); // make sure to include the NULL terminator size_t cnt = s.GetCount() + 1; if (destSize < cnt) { return ErrOverflow; } memcpy_s(dest, destSize, s.GetUTF8NoConvert(), cnt); return cnt; } static FORCEINLINE size_t raw_deserialize(SString & val, const BYTE* src, size_t srcSize) { size_t cnt = strnlen((LPCUTF8)src, srcSize) + 1; // assert we found a NULL terminated string at "src" if (srcSize < cnt) { return ErrOverflow; } // a literal SString avoids a new allocation + copy SString sUtf8(SString::Utf8Literal, (LPCUTF8) src); sUtf8.ConvertToUnicode(val); return cnt; } }; #ifndef TARGET_UNIX // // Specialization for SString-derived classes (like SStrings) // template<typename T> class Traits<T, typename std::enable_if<std::is_base_of<SString, T>::value>::type> : public Traits<SString> { }; #endif // !TARGET_UNIX // // Convenience functions to allow argument type deduction // template <typename T> FORCEINLINE size_t raw_size(const T & val) { return Traits<T>::raw_size(val); } template <typename T> FORCEINLINE size_t raw_serialize(BYTE* dest, size_t destSize, const T & val) { return Traits<T>::raw_serialize(dest, destSize, val); } template <typename T> FORCEINLINE size_t raw_deserialize(T & val, const BYTE* src, size_t srcSize) { return Traits<T>::raw_deserialize(val, src, srcSize); } enum StreamBuffState { sbsOK, sbsUnrecoverable, sbsOOM = sbsUnrecoverable, }; // // OStreamBuff - Manages writing to an output buffer // class OStreamBuff { public: OStreamBuff(BYTE * _buff, size_t _buffsize) : buffsize(_buffsize) , buff(_buff) , crt(0) , sbs(sbsOK) { } template <typename T> OStreamBuff& operator << (const T & val) { if (sbs >= sbsUnrecoverable) return *this; size_t cnt = raw_serialize(buff+crt, buffsize-crt, val); if (cnt == ErrOverflow) { sbs = sbsOOM; } else { crt += cnt; } return *this; } inline size_t GetPos() const { return crt; } inline BOOL operator!() const { return sbs >= sbsUnrecoverable; } inline StreamBuffState State() const { return sbs; } private: size_t buffsize; // size of buffer BYTE* buff; // buffer to stream to size_t crt; // current offset in buffer StreamBuffState sbs; // current state }; // // OStreamBuff - Manages reading from an input buffer // class IStreamBuff { public: IStreamBuff(const BYTE* _buff, size_t _buffsize) : buffsize(_buffsize) , buff(_buff) , crt(0) , sbs(sbsOK) { } template <typename T> IStreamBuff& operator >> (T & val) { if (sbs >= sbsUnrecoverable) return *this; size_t cnt = raw_deserialize(val, buff+crt, buffsize-crt); if (cnt == ErrOverflow) { sbs = sbsOOM; } else { crt += cnt; } return *this; } inline size_t GetPos() const { return crt; } inline BOOL operator!() const { return sbs >= sbsUnrecoverable; } inline StreamBuffState State() const { return sbs; } private: size_t buffsize; // size of buffer const BYTE * buff; // buffer to read from size_t crt; // current offset in buffer StreamBuffState sbs; // current state }; } } using serialization::bin::StreamBuffState; using serialization::bin::IStreamBuff; using serialization::bin::OStreamBuff; // Callback function type used by DacStreamManager to coordinate // amount of available memory between multiple streamable data // structures (e.g. DacEENamesStreamable) typedef bool (*Reserve_Fnptr)(DWORD size, void * writeState); // // DacEENamesStreamable // Stores EE struct* -> Name mappings and streams them to a // streambuf when asked // class DacEENamesStreamable { private: // the hash map storing the interesting mappings of EE* -> Names MapSHash< TADDR, SString, NoRemoveSHashTraits < NonDacAwareSHashTraits< MapSHashTraits <TADDR, SString> > > > m_hash; Reserve_Fnptr m_reserveFn; void *m_writeState; private: // signature value in the header in stream static const DWORD sig = 0x614e4545; // "EENa" - EE Name // header in stream struct StreamHeader { DWORD sig; // 0x614e4545 == "EENa" DWORD cnt; // count of entries static const bool is_blittable = true; }; public: DacEENamesStreamable() : m_reserveFn(NULL) , m_writeState(NULL) {} // Ensures the instance is ready for caching data and later writing // its map entries to an OStreamBuff. bool PrepareStreamForWriting(Reserve_Fnptr pfn, void * writeState) { _ASSERTE(pfn != NULL && writeState != NULL); m_reserveFn = pfn; m_writeState = writeState; DWORD size = (DWORD) sizeof(StreamHeader); // notify owner to reserve space for a StreamHeader return m_reserveFn(size, m_writeState); } // Adds a new mapping from an EE struct pointer (e.g. MethodDesc*) to // its name bool AddEEName(TADDR taEE, const SString & eeName) { _ASSERTE(m_reserveFn != NULL && m_writeState != NULL); // as a micro-optimization convert to Utf8 here as both raw_size and // raw_serialize are optimized for Utf8... StackSString seeName; eeName.ConvertToUTF8(seeName); DWORD size = (DWORD)(serialization::bin::raw_size(taEE) + serialization::bin::raw_size(seeName)); // notify owner of the amount of space needed in the buffer if (m_reserveFn(size, m_writeState)) { // if there's still space cache the entry in m_hash m_hash.AddOrReplace(KeyValuePair<TADDR, SString>(taEE, seeName)); return true; } else { return false; } } // Finds an EE name from a target address of an EE struct (e.g. // MethodDesc*) bool FindEEName(TADDR taEE, SString & eeName) const { return m_hash.Lookup(taEE, &eeName) == TRUE; } void Clear() { m_hash.RemoveAll(); } // Writes a header and the hash entries to an OStreamBuff HRESULT StreamTo(OStreamBuff &out) const { StreamHeader hdr; hdr.sig = sig; hdr.cnt = (DWORD) m_hash.GetCount(); out << hdr; auto end = m_hash.End(); for (auto cur = m_hash.Begin(); end != cur; ++cur) { out << cur->Key() << cur->Value(); if (!out) return E_FAIL; } return S_OK; } // Reads a header and the hash entries from an IStreamBuff HRESULT StreamFrom(IStreamBuff &in) { StreamHeader hdr; in >> hdr; // in >> hdr.sig >> hdr.cnt; if (hdr.sig != sig) return E_FAIL; for (size_t i = 0; i < hdr.cnt; ++i) { TADDR taEE; SString eeName; in >> taEE >> eeName; if (!in) return E_FAIL; m_hash.AddOrReplace(KeyValuePair<TADDR, SString>(taEE, eeName)); } return S_OK; } }; //================================================================================ // This class enables two scenarios: // 1. When debugging a triage/mini-dump the class is initialized with a valid // buffer in taMiniMetaDataBuff. Afterwards one can call MdCacheGetEEName to // retrieve the name associated with a MethodDesc*. // 2. When generating a dump one must follow this sequence: // a. Initialize the DacStreamManager passing a valid (if the current // debugging target is a triage/mini-dump) or empty buffer (if the // current target is a live processa full or a heap dump) // b. Call PrepareStreamsForWriting() before starting enumerating any memory // c. Call MdCacheAddEEName() anytime we enumerate an EE structure of interest // d. Call EnumStreams() as the last action in the memory enumeration method. // class DacStreamManager { public: enum eReadOrWrite { eNone, // the stream doesn't exist (target is a live process/full/heap dump) eRO, // the stream exists and we've read it (target is triage/mini-dump) eWO, // the stream doesn't exist but we're creating it // (e.g. to save a minidump from the current debugging session) eRW // the stream exists but we're generating another triage/mini-dump }; static const DWORD sig = 0x6d727473; // 'strm' struct StreamsHeader { DWORD dwSig; // 0x6d727473 == "strm" DWORD dwTotalSize; // total size in bytes DWORD dwCntStreams; // number of streams (currently 1) static const bool is_blittable = true; }; DacStreamManager(TADDR miniMetaDataBuffAddress, DWORD miniMetaDataBuffSizeMax) : m_MiniMetaDataBuffAddress(miniMetaDataBuffAddress) , m_MiniMetaDataBuffSizeMax(miniMetaDataBuffSizeMax) , m_rawBuffer(NULL) , m_cbAvailBuff(0) , m_rw(eNone) , m_bStreamsRead(FALSE) , m_EENames() { Initialize(); } ~DacStreamManager() { if (m_rawBuffer != NULL) { delete [] m_rawBuffer; } } bool PrepareStreamsForWriting() { if (m_rw == eNone) m_rw = eWO; else if (m_rw == eRO) m_rw = eRW; else if (m_rw == eRW) /* nothing */; else // m_rw == eWO { // this is a second invocation from a possibly live process // clean up the map since the callstacks/exceptions may be different m_EENames.Clear(); } // update available count based on the header and footer sizes if (m_MiniMetaDataBuffSizeMax < sizeof(StreamsHeader)) return false; m_cbAvailBuff = m_MiniMetaDataBuffSizeMax - sizeof(StreamsHeader); // update available count based on each stream's initial needs if (!m_EENames.PrepareStreamForWriting(&ReserveInBuffer, this)) return false; return true; } bool MdCacheAddEEName(TADDR taEEStruct, const SString& name) { // don't cache unless we enabled "W"riting from a target that does not // already have a stream yet if (m_rw != eWO) return false; m_EENames.AddEEName(taEEStruct, name); return true; } HRESULT EnumStreams(IN CLRDataEnumMemoryFlags flags) { _ASSERTE(flags == CLRDATA_ENUM_MEM_MINI || flags == CLRDATA_ENUM_MEM_TRIAGE); _ASSERTE(m_rw == eWO || m_rw == eRW); DWORD cbWritten = 0; if (m_rw == eWO) { // only dump the stream is it wasn't already present in the target DumpAllStreams(&cbWritten); } else { cbWritten = m_MiniMetaDataBuffSizeMax; } DacEnumMemoryRegion(m_MiniMetaDataBuffAddress, cbWritten, false); DacUpdateMemoryRegion(m_MiniMetaDataBuffAddress, cbWritten, m_rawBuffer); return S_OK; } bool MdCacheGetEEName(TADDR taEEStruct, SString & eeName) { if (!m_bStreamsRead) { ReadAllStreams(); } if (m_rw == eNone || m_rw == eWO) { return false; } return m_EENames.FindEEName(taEEStruct, eeName); } private: HRESULT Initialize() { _ASSERTE(m_rw == eNone); _ASSERTE(m_rawBuffer == NULL); HRESULT hr = S_OK; StreamsHeader hdr; DacReadAll(dac_cast<TADDR>(m_MiniMetaDataBuffAddress), &hdr, sizeof(hdr), true); // when the DAC looks at a triage dump or minidump generated using // a "minimetadata" enabled DAC, buff will point to a serialized // representation of a methoddesc->method name hashmap. if (hdr.dwSig == sig) { m_rw = eRO; m_MiniMetaDataBuffSizeMax = hdr.dwTotalSize; hr = S_OK; } else // when the DAC initializes this for the case where the target is // (a) a live process, or (b) a full dump, buff will point to a // zero initialized memory region (allocated w/ VirtualAlloc) if (hdr.dwSig == 0 && hdr.dwTotalSize == 0 && hdr.dwCntStreams == 0) { hr = S_OK; } // otherwise we may have some memory corruption. treat this as // a liveprocess/full dump else { hr = S_FALSE; } BYTE * buff = new BYTE[m_MiniMetaDataBuffSizeMax]; DacReadAll(dac_cast<TADDR>(m_MiniMetaDataBuffAddress), buff, m_MiniMetaDataBuffSizeMax, true); m_rawBuffer = buff; return hr; } HRESULT DumpAllStreams(DWORD * pcbWritten) { _ASSERTE(m_rw == eWO); HRESULT hr = S_OK; OStreamBuff out(m_rawBuffer, m_MiniMetaDataBuffSizeMax); // write header StreamsHeader hdr; hdr.dwSig = sig; hdr.dwTotalSize = m_MiniMetaDataBuffSizeMax-m_cbAvailBuff; // will update hdr.dwCntStreams = 1; out << hdr; // write MethodDesc->Method name map hr = m_EENames.StreamTo(out); // wrap up the buffer whether we ecountered an error or not size_t cbWritten = out.GetPos(); cbWritten = ALIGN_UP(cbWritten, sizeof(size_t)); // patch the dwTotalSize field blitted at the beginning of the buffer ((StreamsHeader*)m_rawBuffer)->dwTotalSize = (DWORD) cbWritten; if (pcbWritten) *pcbWritten = (DWORD) cbWritten; return hr; } HRESULT ReadAllStreams() { _ASSERTE(!m_bStreamsRead); if (m_rw == eNone || m_rw == eWO) { // no streams to read... m_bStreamsRead = TRUE; return S_FALSE; } HRESULT hr = S_OK; IStreamBuff in(m_rawBuffer, m_MiniMetaDataBuffSizeMax); // read header StreamsHeader hdr; in >> hdr; _ASSERTE(hdr.dwSig == sig); _ASSERTE(hdr.dwCntStreams == 1); // read EE struct pointer -> EE name map m_EENames.Clear(); hr = m_EENames.StreamFrom(in); m_bStreamsRead = TRUE; return hr; } static bool ReserveInBuffer(DWORD size, void * writeState) { DacStreamManager * pThis = reinterpret_cast<DacStreamManager*>(writeState); if (size > pThis->m_cbAvailBuff) { return false; } else { pThis->m_cbAvailBuff -= size; return true; } } private: TADDR m_MiniMetaDataBuffAddress; // TADDR of the buffer DWORD m_MiniMetaDataBuffSizeMax; // max size of buffer BYTE * m_rawBuffer; // inproc copy of buffer DWORD m_cbAvailBuff; // available bytes in buffer eReadOrWrite m_rw; BOOL m_bStreamsRead; DacEENamesStreamable m_EENames; }; #endif // FEATURE_MINIMETADATA_IN_TRIAGEDUMPS //---------------------------------------------------------------------------- // // ClrDataAccess. // //---------------------------------------------------------------------------- LONG ClrDataAccess::s_procInit; ClrDataAccess::ClrDataAccess(ICorDebugDataTarget * pTarget, ICLRDataTarget * pLegacyTarget/*=0*/) { SUPPORTS_DAC_HOST_ONLY; // ctor does no marshalling - don't check with DacCop /* * Stash the various forms of the new ICorDebugDataTarget interface */ m_pTarget = pTarget; m_pTarget->AddRef(); HRESULT hr; hr = m_pTarget->QueryInterface(__uuidof(ICorDebugMutableDataTarget), (void**)&m_pMutableTarget); if (hr != S_OK) { // Create a target which always fails the write requests with CORDBG_E_TARGET_READONLY m_pMutableTarget = new ReadOnlyDataTargetFacade(); m_pMutableTarget->AddRef(); } /* * If we have a legacy target, it means we're providing compatibility for code that used * the old ICLRDataTarget interfaces. There are still a few things (like metadata location, * GetImageBase, and VirtualAlloc) that the implementation may use which we haven't superseded * in ICorDebugDataTarget, so we still need access to the old target interfaces. * Any functionality that does exist in ICorDebugDataTarget is accessed from that interface * using the DataTargetAdapter on top of the legacy interface (to unify the calling code). * Eventually we may expose all functionality we need using ICorDebug (possibly a private * interface for things like VirtualAlloc), at which point we can stop using the legacy interfaces * completely (except in the DataTargetAdapter). */ m_pLegacyTarget = NULL; m_pLegacyTarget2 = NULL; m_pLegacyTarget3 = NULL; m_legacyMetaDataLocator = NULL; m_target3 = NULL; if (pLegacyTarget != NULL) { m_pLegacyTarget = pLegacyTarget; m_pLegacyTarget->AddRef(); m_pLegacyTarget->QueryInterface(__uuidof(ICLRDataTarget2), (void**)&m_pLegacyTarget2); m_pLegacyTarget->QueryInterface(__uuidof(ICLRDataTarget3), (void**)&m_pLegacyTarget3); if (pLegacyTarget->QueryInterface(__uuidof(ICLRMetadataLocator), (void**)&m_legacyMetaDataLocator) != S_OK) { // The debugger doesn't implement IMetadataLocator. Use // IXCLRDataTarget3 if that exists. Otherwise we don't need it. pLegacyTarget->QueryInterface(__uuidof(IXCLRDataTarget3), (void**)&m_target3); } } m_globalBase = 0; m_refs = 1; m_instanceAge = 0; m_debugMode = GetEnvironmentVariableA("MSCORDACWKS_DEBUG", NULL, 0) != 0; m_enumMemCb = NULL; m_updateMemCb = NULL; m_enumMemFlags = (CLRDataEnumMemoryFlags)-1; // invalid m_jitNotificationTable = NULL; m_gcNotificationTable = NULL; #ifdef FEATURE_MINIMETADATA_IN_TRIAGEDUMPS m_streams = NULL; #endif // FEATURE_MINIMETADATA_IN_TRIAGEDUMPS // Target consistency checks are disabled by default. // See code:ClrDataAccess::SetTargetConsistencyChecks for details. m_fEnableTargetConsistencyAsserts = false; #ifdef _DEBUG if (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_DbgDACEnableAssert)) { m_fEnableTargetConsistencyAsserts = true; } // Verification asserts are disabled by default because some debuggers (cdb/windbg) probe likely locations // for DAC and having this assert pop up all the time can be annoying. We let derived classes enable // this if they want. It can also be overridden at run-time with COMPlus_DbgDACAssertOnMismatch, // see ClrDataAccess::VerifyDlls for details. m_fEnableDllVerificationAsserts = false; #endif } ClrDataAccess::~ClrDataAccess(void) { SUPPORTS_DAC_HOST_ONLY; #ifdef FEATURE_MINIMETADATA_IN_TRIAGEDUMPS if (m_streams) { delete m_streams; } #endif // FEATURE_MINIMETADATA_IN_TRIAGEDUMPS delete [] m_jitNotificationTable; if (m_pLegacyTarget) { m_pLegacyTarget->Release(); } if (m_pLegacyTarget2) { m_pLegacyTarget2->Release(); } if (m_pLegacyTarget3) { m_pLegacyTarget3->Release(); } if (m_legacyMetaDataLocator) { m_legacyMetaDataLocator->Release(); } if (m_target3) { m_target3->Release(); } m_pTarget->Release(); m_pMutableTarget->Release(); } STDMETHODIMP ClrDataAccess::QueryInterface(THIS_ IN REFIID interfaceId, OUT PVOID* iface) { void* ifaceRet; if (IsEqualIID(interfaceId, IID_IUnknown) || IsEqualIID(interfaceId, __uuidof(IXCLRDataProcess)) || IsEqualIID(interfaceId, __uuidof(IXCLRDataProcess2))) { ifaceRet = static_cast<IXCLRDataProcess2*>(this); } else if (IsEqualIID(interfaceId, __uuidof(ICLRDataEnumMemoryRegions))) { ifaceRet = static_cast<ICLRDataEnumMemoryRegions*>(this); } else if (IsEqualIID(interfaceId, __uuidof(ISOSDacInterface))) { ifaceRet = static_cast<ISOSDacInterface*>(this); } else if (IsEqualIID(interfaceId, __uuidof(ISOSDacInterface2))) { ifaceRet = static_cast<ISOSDacInterface2*>(this); } else if (IsEqualIID(interfaceId, __uuidof(ISOSDacInterface3))) { ifaceRet = static_cast<ISOSDacInterface3*>(this); } else if (IsEqualIID(interfaceId, __uuidof(ISOSDacInterface4))) { ifaceRet = static_cast<ISOSDacInterface4*>(this); } else if (IsEqualIID(interfaceId, __uuidof(ISOSDacInterface5))) { ifaceRet = static_cast<ISOSDacInterface5*>(this); } else if (IsEqualIID(interfaceId, __uuidof(ISOSDacInterface6))) { ifaceRet = static_cast<ISOSDacInterface6*>(this); } else if (IsEqualIID(interfaceId, __uuidof(ISOSDacInterface7))) { ifaceRet = static_cast<ISOSDacInterface7*>(this); } else if (IsEqualIID(interfaceId, __uuidof(ISOSDacInterface8))) { ifaceRet = static_cast<ISOSDacInterface8*>(this); } else if (IsEqualIID(interfaceId, __uuidof(ISOSDacInterface9))) { ifaceRet = static_cast<ISOSDacInterface9*>(this); } else if (IsEqualIID(interfaceId, __uuidof(ISOSDacInterface10))) { ifaceRet = static_cast<ISOSDacInterface10*>(this); } else if (IsEqualIID(interfaceId, __uuidof(ISOSDacInterface11))) { ifaceRet = static_cast<ISOSDacInterface11*>(this); } else { *iface = NULL; return E_NOINTERFACE; } AddRef(); *iface = ifaceRet; return S_OK; } STDMETHODIMP_(ULONG) ClrDataAccess::AddRef(THIS) { return InterlockedIncrement(&m_refs); } STDMETHODIMP_(ULONG) ClrDataAccess::Release(THIS) { SUPPORTS_DAC_HOST_ONLY; LONG newRefs = InterlockedDecrement(&m_refs); if (newRefs == 0) { delete this; } return newRefs; } HRESULT STDMETHODCALLTYPE ClrDataAccess::Flush(void) { SUPPORTS_DAC_HOST_ONLY; // // Free MD import objects. // m_mdImports.Flush(); // Free instance memory. m_instances.Flush(); // When the host instance cache is flushed we // update the instance age count so that // all child objects automatically become // invalid. This prevents them from using // any pointers they've kept to host instances // which are now gone. m_instanceAge++; return S_OK; } HRESULT STDMETHODCALLTYPE ClrDataAccess::StartEnumTasks( /* [out] */ CLRDATA_ENUM* handle) { HRESULT status; DAC_ENTER(); EX_TRY { if (ThreadStore::s_pThreadStore) { Thread* thread = ThreadStore::GetAllThreadList(NULL, 0, 0); *handle = TO_CDENUM(thread); status = *handle ? S_OK : S_FALSE; } else { status = S_FALSE; } } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::EnumTask( /* [in, out] */ CLRDATA_ENUM* handle, /* [out] */ IXCLRDataTask **task) { HRESULT status; DAC_ENTER(); EX_TRY { if (*handle) { Thread* thread = FROM_CDENUM(Thread, *handle); *task = new (nothrow) ClrDataTask(this, thread); if (*task) { thread = ThreadStore::GetAllThreadList(thread, 0, 0); *handle = TO_CDENUM(thread); status = S_OK; } else { status = E_OUTOFMEMORY; } } else { status = S_FALSE; } } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::EndEnumTasks( /* [in] */ CLRDATA_ENUM handle) { HRESULT status; DAC_ENTER(); EX_TRY { // Enumerator holds no resources. status = S_OK; } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::GetTaskByOSThreadID( /* [in] */ ULONG32 osThreadID, /* [out] */ IXCLRDataTask **task) { HRESULT status; DAC_ENTER(); EX_TRY { status = E_INVALIDARG; Thread* thread = DacGetThread(osThreadID); if (thread != NULL) { *task = new (nothrow) ClrDataTask(this, thread); status = *task ? S_OK : E_OUTOFMEMORY; } } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::GetTaskByUniqueID( /* [in] */ ULONG64 uniqueID, /* [out] */ IXCLRDataTask **task) { HRESULT status; DAC_ENTER(); EX_TRY { Thread* thread = FindClrThreadByTaskId(uniqueID); if (thread) { *task = new (nothrow) ClrDataTask(this, thread); status = *task ? S_OK : E_OUTOFMEMORY; } else { status = E_INVALIDARG; } } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::GetFlags( /* [out] */ ULONG32 *flags) { HRESULT status; DAC_ENTER(); EX_TRY { // XXX Microsoft - GC check. *flags = CLRDATA_PROCESS_DEFAULT; status = S_OK; } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::IsSameObject( /* [in] */ IXCLRDataProcess* process) { HRESULT status; DAC_ENTER(); EX_TRY { status = m_pTarget == ((ClrDataAccess*)process)->m_pTarget ? S_OK : S_FALSE; } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::GetManagedObject( /* [out] */ IXCLRDataValue **value) { HRESULT status; DAC_ENTER(); EX_TRY { // XXX Microsoft. status = E_NOTIMPL; } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::GetDesiredExecutionState( /* [out] */ ULONG32 *state) { HRESULT status; DAC_ENTER(); EX_TRY { // XXX Microsoft. status = E_NOTIMPL; } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::SetDesiredExecutionState( /* [in] */ ULONG32 state) { HRESULT status; DAC_ENTER(); EX_TRY { // XXX Microsoft. status = E_NOTIMPL; } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::GetAddressType( /* [in] */ CLRDATA_ADDRESS address, /* [out] */ CLRDataAddressType* type) { HRESULT status; DAC_ENTER(); EX_TRY { // The only thing that constitutes a failure is some // dac failure while checking things. status = S_OK; TADDR taAddr = CLRDATA_ADDRESS_TO_TADDR(address); if (IsPossibleCodeAddress(taAddr) == S_OK) { if (ExecutionManager::IsManagedCode(taAddr)) { *type = CLRDATA_ADDRESS_MANAGED_METHOD; goto Exit; } if (StubManager::IsStub(taAddr)) { *type = CLRDATA_ADDRESS_RUNTIME_UNMANAGED_STUB; goto Exit; } } *type = CLRDATA_ADDRESS_UNRECOGNIZED; Exit: ; } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::GetRuntimeNameByAddress( /* [in] */ CLRDATA_ADDRESS address, /* [in] */ ULONG32 flags, /* [in] */ ULONG32 bufLen, /* [out] */ ULONG32 *symbolLen, /* [size_is][out] */ _Out_writes_bytes_opt_(bufLen) WCHAR symbolBuf[ ], /* [out] */ CLRDATA_ADDRESS* displacement) { HRESULT status; DAC_ENTER(); EX_TRY { #ifdef TARGET_ARM address &= ~THUMB_CODE; //workaround for windbg passing in addresses with the THUMB mode bit set #endif status = RawGetMethodName(address, flags, bufLen, symbolLen, symbolBuf, displacement); } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::StartEnumAppDomains( /* [out] */ CLRDATA_ENUM* handle) { HRESULT status; DAC_ENTER(); EX_TRY { AppDomainIterator* iter = new (nothrow) AppDomainIterator(FALSE); if (iter) { *handle = TO_CDENUM(iter); status = S_OK; } else { status = E_OUTOFMEMORY; } } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::EnumAppDomain( /* [in, out] */ CLRDATA_ENUM* handle, /* [out] */ IXCLRDataAppDomain **appDomain) { HRESULT status; DAC_ENTER(); EX_TRY { AppDomainIterator* iter = FROM_CDENUM(AppDomainIterator, *handle); if (iter->Next()) { *appDomain = new (nothrow) ClrDataAppDomain(this, iter->GetDomain()); status = *appDomain ? S_OK : E_OUTOFMEMORY; } else { status = S_FALSE; } } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::EndEnumAppDomains( /* [in] */ CLRDATA_ENUM handle) { HRESULT status; DAC_ENTER(); EX_TRY { AppDomainIterator* iter = FROM_CDENUM(AppDomainIterator, handle); delete iter; status = S_OK; } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::GetAppDomainByUniqueID( /* [in] */ ULONG64 uniqueID, /* [out] */ IXCLRDataAppDomain **appDomain) { HRESULT status; DAC_ENTER(); EX_TRY { if (uniqueID != DefaultADID) { status = E_INVALIDARG; } else { *appDomain = new (nothrow) ClrDataAppDomain(this, AppDomain::GetCurrentDomain()); status = *appDomain ? S_OK : E_OUTOFMEMORY; } } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::StartEnumAssemblies( /* [out] */ CLRDATA_ENUM* handle) { HRESULT status; DAC_ENTER(); EX_TRY { ProcessModIter* iter = new (nothrow) ProcessModIter; if (iter) { *handle = TO_CDENUM(iter); status = S_OK; } else { status = E_OUTOFMEMORY; } } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::EnumAssembly( /* [in, out] */ CLRDATA_ENUM* handle, /* [out] */ IXCLRDataAssembly **assembly) { HRESULT status; DAC_ENTER(); EX_TRY { ProcessModIter* iter = FROM_CDENUM(ProcessModIter, *handle); Assembly* assem; if ((assem = iter->NextAssem())) { *assembly = new (nothrow) ClrDataAssembly(this, assem); status = *assembly ? S_OK : E_OUTOFMEMORY; } else { status = S_FALSE; } } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::EndEnumAssemblies( /* [in] */ CLRDATA_ENUM handle) { HRESULT status; DAC_ENTER(); EX_TRY { ProcessModIter* iter = FROM_CDENUM(ProcessModIter, handle); delete iter; status = S_OK; } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::StartEnumModules( /* [out] */ CLRDATA_ENUM* handle) { HRESULT status; DAC_ENTER(); EX_TRY { ProcessModIter* iter = new (nothrow) ProcessModIter; if (iter) { *handle = TO_CDENUM(iter); status = S_OK; } else { status = E_OUTOFMEMORY; } } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::EnumModule( /* [in, out] */ CLRDATA_ENUM* handle, /* [out] */ IXCLRDataModule **mod) { HRESULT status; DAC_ENTER(); EX_TRY { ProcessModIter* iter = FROM_CDENUM(ProcessModIter, *handle); Module* curMod; if ((curMod = iter->NextModule())) { *mod = new (nothrow) ClrDataModule(this, curMod); status = *mod ? S_OK : E_OUTOFMEMORY; } else { status = S_FALSE; } } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::EndEnumModules( /* [in] */ CLRDATA_ENUM handle) { HRESULT status; DAC_ENTER(); EX_TRY { ProcessModIter* iter = FROM_CDENUM(ProcessModIter, handle); delete iter; status = S_OK; } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::GetModuleByAddress( /* [in] */ CLRDATA_ADDRESS address, /* [out] */ IXCLRDataModule** mod) { HRESULT status; DAC_ENTER(); EX_TRY { ProcessModIter modIter; Module* modDef; while ((modDef = modIter.NextModule())) { TADDR base; ULONG32 length; PEAssembly* pPEAssembly = modDef->GetPEAssembly(); if ((base = PTR_TO_TADDR(pPEAssembly->GetLoadedImageContents(&length)))) { if (TO_CDADDR(base) <= address && TO_CDADDR(base + length) > address) { break; } } } if (modDef) { *mod = new (nothrow) ClrDataModule(this, modDef); status = *mod ? S_OK : E_OUTOFMEMORY; } else { status = S_FALSE; } } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::StartEnumMethodDefinitionsByAddress( /* [in] */ CLRDATA_ADDRESS address, /* [out] */ CLRDATA_ENUM *handle) { HRESULT status; DAC_ENTER(); EX_TRY { ProcessModIter modIter; Module* modDef; while ((modDef = modIter.NextModule())) { TADDR base; ULONG32 length; PEAssembly* assembly = modDef->GetPEAssembly(); if ((base = PTR_TO_TADDR(assembly->GetLoadedImageContents(&length)))) { if (TO_CDADDR(base) <= address && TO_CDADDR(base + length) > address) { break; } } } status = EnumMethodDefinitions:: CdStart(modDef, true, address, handle); } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::EnumMethodDefinitionByAddress( /* [out][in] */ CLRDATA_ENUM* handle, /* [out] */ IXCLRDataMethodDefinition **method) { HRESULT status; DAC_ENTER(); EX_TRY { status = EnumMethodDefinitions::CdNext(this, handle, method); } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::EndEnumMethodDefinitionsByAddress( /* [in] */ CLRDATA_ENUM handle) { HRESULT status; DAC_ENTER(); EX_TRY { status = EnumMethodDefinitions::CdEnd(handle); } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::StartEnumMethodInstancesByAddress( /* [in] */ CLRDATA_ADDRESS address, /* [in] */ IXCLRDataAppDomain* appDomain, /* [out] */ CLRDATA_ENUM *handle) { HRESULT status; DAC_ENTER(); EX_TRY { MethodDesc* methodDesc; *handle = 0; status = S_FALSE; TADDR taddr; if( (status = TRY_CLRDATA_ADDRESS_TO_TADDR(address, &taddr)) != S_OK ) { goto Exit; } if (IsPossibleCodeAddress(taddr) != S_OK) { goto Exit; } methodDesc = ExecutionManager::GetCodeMethodDesc(taddr); if (!methodDesc) { goto Exit; } status = EnumMethodInstances::CdStart(methodDesc, appDomain, handle); Exit: ; } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::EnumMethodInstanceByAddress( /* [out][in] */ CLRDATA_ENUM* handle, /* [out] */ IXCLRDataMethodInstance **method) { HRESULT status; DAC_ENTER(); EX_TRY { status = EnumMethodInstances::CdNext(this, handle, method); } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::EndEnumMethodInstancesByAddress( /* [in] */ CLRDATA_ENUM handle) { HRESULT status; DAC_ENTER(); EX_TRY { status = EnumMethodInstances::CdEnd(handle); } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::GetDataByAddress( /* [in] */ CLRDATA_ADDRESS address, /* [in] */ ULONG32 flags, /* [in] */ IXCLRDataAppDomain* appDomain, /* [in] */ IXCLRDataTask* tlsTask, /* [in] */ ULONG32 bufLen, /* [out] */ ULONG32 *nameLen, /* [size_is][out] */ _Out_writes_to_opt_(bufLen, *nameLen) WCHAR nameBuf[ ], /* [out] */ IXCLRDataValue **value, /* [out] */ CLRDATA_ADDRESS *displacement) { HRESULT status; if (flags != 0) { return E_INVALIDARG; } DAC_ENTER(); EX_TRY { // XXX Microsoft. status = E_NOTIMPL; } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::GetExceptionStateByExceptionRecord( /* [in] */ EXCEPTION_RECORD64 *record, /* [out] */ IXCLRDataExceptionState **exception) { HRESULT status; DAC_ENTER(); EX_TRY { // XXX Microsoft. status = E_NOTIMPL; } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::TranslateExceptionRecordToNotification( /* [in] */ EXCEPTION_RECORD64 *record, /* [in] */ IXCLRDataExceptionNotification *notify) { HRESULT status = E_FAIL; ClrDataModule* pubModule = NULL; ClrDataMethodInstance* pubMethodInst = NULL; ClrDataExceptionState* pubExState = NULL; GcEvtArgs pubGcEvtArgs; ULONG32 notifyType = 0; DWORD catcherNativeOffset = 0; TADDR nativeCodeLocation = NULL; DAC_ENTER(); EX_TRY { // // We cannot hold the dac lock while calling // out as the external code can do arbitrary things. // Instead we make a pass over the exception // information and create all necessary objects. // We then leave the lock and make the callbac. // TADDR exInfo[EXCEPTION_MAXIMUM_PARAMETERS]; for (UINT i = 0; i < EXCEPTION_MAXIMUM_PARAMETERS; i++) { exInfo[i] = TO_TADDR(record->ExceptionInformation[i]); } notifyType = DACNotify::GetType(exInfo); switch(notifyType) { case DACNotify::MODULE_LOAD_NOTIFICATION: { TADDR modulePtr; if (DACNotify::ParseModuleLoadNotification(exInfo, modulePtr)) { Module* clrModule = PTR_Module(modulePtr); pubModule = new (nothrow) ClrDataModule(this, clrModule); if (pubModule == NULL) { status = E_OUTOFMEMORY; } else { status = S_OK; } } break; } case DACNotify::MODULE_UNLOAD_NOTIFICATION: { TADDR modulePtr; if (DACNotify::ParseModuleUnloadNotification(exInfo, modulePtr)) { Module* clrModule = PTR_Module(modulePtr); pubModule = new (nothrow) ClrDataModule(this, clrModule); if (pubModule == NULL) { status = E_OUTOFMEMORY; } else { status = S_OK; } } break; } case DACNotify::JIT_NOTIFICATION2: { TADDR methodDescPtr; if(DACNotify::ParseJITNotification(exInfo, methodDescPtr, nativeCodeLocation)) { // Try and find the right appdomain MethodDesc* methodDesc = PTR_MethodDesc(methodDescPtr); BaseDomain* baseDomain = methodDesc->GetDomain(); AppDomain* appDomain = NULL; if (baseDomain->IsAppDomain()) { appDomain = PTR_AppDomain(PTR_HOST_TO_TADDR(baseDomain)); } else { // Find a likely domain, because it's the shared domain. AppDomainIterator adi(FALSE); appDomain = adi.GetDomain(); } pubMethodInst = new (nothrow) ClrDataMethodInstance(this, appDomain, methodDesc); if (pubMethodInst == NULL) { status = E_OUTOFMEMORY; } else { status = S_OK; } } break; } case DACNotify::EXCEPTION_NOTIFICATION: { TADDR threadPtr; if (DACNotify::ParseExceptionNotification(exInfo, threadPtr)) { // Translation can only occur at the time of // receipt of the notify exception, so we assume // that the Thread's current exception state // is the state we want. status = ClrDataExceptionState:: NewFromThread(this, PTR_Thread(threadPtr), &pubExState, NULL); } break; } case DACNotify::GC_NOTIFICATION: { if (DACNotify::ParseGCNotification(exInfo, pubGcEvtArgs)) { status = S_OK; } break; } case DACNotify::CATCH_ENTER_NOTIFICATION: { TADDR methodDescPtr; if (DACNotify::ParseExceptionCatcherEnterNotification(exInfo, methodDescPtr, catcherNativeOffset)) { // Try and find the right appdomain MethodDesc* methodDesc = PTR_MethodDesc(methodDescPtr); BaseDomain* baseDomain = methodDesc->GetDomain(); AppDomain* appDomain = NULL; if (baseDomain->IsAppDomain()) { appDomain = PTR_AppDomain(PTR_HOST_TO_TADDR(baseDomain)); } else { // Find a likely domain, because it's the shared domain. AppDomainIterator adi(FALSE); appDomain = adi.GetDomain(); } pubMethodInst = new (nothrow) ClrDataMethodInstance(this, appDomain, methodDesc); if (pubMethodInst == NULL) { status = E_OUTOFMEMORY; } else { status = S_OK; } } break; } default: status = E_INVALIDARG; break; } } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); if (status == S_OK) { IXCLRDataExceptionNotification2* notify2; if (notify->QueryInterface(__uuidof(IXCLRDataExceptionNotification2), (void**)&notify2) != S_OK) { notify2 = NULL; } IXCLRDataExceptionNotification3* notify3; if (notify->QueryInterface(__uuidof(IXCLRDataExceptionNotification3), (void**)&notify3) != S_OK) { notify3 = NULL; } IXCLRDataExceptionNotification4* notify4; if (notify->QueryInterface(__uuidof(IXCLRDataExceptionNotification4), (void**)&notify4) != S_OK) { notify4 = NULL; } IXCLRDataExceptionNotification5* notify5; if (notify->QueryInterface(__uuidof(IXCLRDataExceptionNotification5), (void**)&notify5) != S_OK) { notify5 = NULL; } switch(notifyType) { case DACNotify::MODULE_LOAD_NOTIFICATION: notify->OnModuleLoaded(pubModule); break; case DACNotify::MODULE_UNLOAD_NOTIFICATION: notify->OnModuleUnloaded(pubModule); break; case DACNotify::JIT_NOTIFICATION2: notify->OnCodeGenerated(pubMethodInst); if (notify5) { notify5->OnCodeGenerated2(pubMethodInst, TO_CDADDR(nativeCodeLocation)); } break; case DACNotify::EXCEPTION_NOTIFICATION: if (notify2) { notify2->OnException(pubExState); } else { status = E_INVALIDARG; } break; case DACNotify::GC_NOTIFICATION: if (notify3) { notify3->OnGcEvent(pubGcEvtArgs); } break; case DACNotify::CATCH_ENTER_NOTIFICATION: if (notify4) { notify4->ExceptionCatcherEnter(pubMethodInst, catcherNativeOffset); } break; default: // notifyType has already been validated. _ASSERTE(FALSE); break; } if (notify2) { notify2->Release(); } if (notify3) { notify3->Release(); } if (notify4) { notify4->Release(); } if (notify5) { notify5->Release(); } } if (pubModule) { pubModule->Release(); } if (pubMethodInst) { pubMethodInst->Release(); } if (pubExState) { pubExState->Release(); } return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::CreateMemoryValue( /* [in] */ IXCLRDataAppDomain* appDomain, /* [in] */ IXCLRDataTask* tlsTask, /* [in] */ IXCLRDataTypeInstance* type, /* [in] */ CLRDATA_ADDRESS addr, /* [out] */ IXCLRDataValue** value) { HRESULT status; DAC_ENTER(); EX_TRY { AppDomain* dacDomain; Thread* dacThread; TypeHandle dacType; ULONG32 flags; NativeVarLocation loc; dacDomain = ((ClrDataAppDomain*)appDomain)->GetAppDomain(); if (tlsTask) { dacThread = ((ClrDataTask*)tlsTask)->GetThread(); } else { dacThread = NULL; } dacType = ((ClrDataTypeInstance*)type)->GetTypeHandle(); flags = GetTypeFieldValueFlags(dacType, NULL, 0, false); loc.addr = addr; loc.size = dacType.GetSize(); loc.contextReg = false; *value = new (nothrow) ClrDataValue(this, dacDomain, dacThread, flags, dacType, addr, 1, &loc); status = *value ? S_OK : E_OUTOFMEMORY; } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::SetAllTypeNotifications( /* [in] */ IXCLRDataModule* mod, /* [in] */ ULONG32 flags) { HRESULT status; DAC_ENTER(); EX_TRY { // XXX Microsoft. status = E_NOTIMPL; } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::SetAllCodeNotifications( /* [in] */ IXCLRDataModule* mod, /* [in] */ ULONG32 flags) { HRESULT status; DAC_ENTER(); EX_TRY { status = E_FAIL; if (!IsValidMethodCodeNotification(flags)) { status = E_INVALIDARG; } else { JITNotifications jn(GetHostJitNotificationTable()); if (!jn.IsActive()) { status = E_OUTOFMEMORY; } else { BOOL changedTable; TADDR modulePtr = mod ? PTR_HOST_TO_TADDR(((ClrDataModule*)mod)->GetModule()) : NULL; if (jn.SetAllNotifications(modulePtr, flags, &changedTable)) { if (!changedTable || (changedTable && jn.UpdateOutOfProcTable())) { status = S_OK; } } } } } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::GetTypeNotifications( /* [in] */ ULONG32 numTokens, /* [in, size_is(numTokens)] */ IXCLRDataModule* mods[], /* [in] */ IXCLRDataModule* singleMod, /* [in, size_is(numTokens)] */ mdTypeDef tokens[], /* [out, size_is(numTokens)] */ ULONG32 flags[]) { HRESULT status; DAC_ENTER(); EX_TRY { // XXX Microsoft. status = E_NOTIMPL; } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::SetTypeNotifications( /* [in] */ ULONG32 numTokens, /* [in, size_is(numTokens)] */ IXCLRDataModule* mods[], /* [in] */ IXCLRDataModule* singleMod, /* [in, size_is(numTokens)] */ mdTypeDef tokens[], /* [in, size_is(numTokens)] */ ULONG32 flags[], /* [in] */ ULONG32 singleFlags) { HRESULT status; DAC_ENTER(); EX_TRY { // XXX Microsoft. status = E_NOTIMPL; } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::GetCodeNotifications( /* [in] */ ULONG32 numTokens, /* [in, size_is(numTokens)] */ IXCLRDataModule* mods[], /* [in] */ IXCLRDataModule* singleMod, /* [in, size_is(numTokens)] */ mdMethodDef tokens[], /* [out, size_is(numTokens)] */ ULONG32 flags[]) { HRESULT status; DAC_ENTER(); EX_TRY { if ((flags == NULL || tokens == NULL) || (mods == NULL && singleMod == NULL) || (mods != NULL && singleMod != NULL)) { status = E_INVALIDARG; } else { JITNotifications jn(GetHostJitNotificationTable()); if (!jn.IsActive()) { status = E_OUTOFMEMORY; } else { TADDR modulePtr = NULL; if (singleMod) { modulePtr = PTR_HOST_TO_TADDR(((ClrDataModule*)singleMod)-> GetModule()); } for (ULONG32 i = 0; i < numTokens; i++) { if (singleMod == NULL) { modulePtr = PTR_HOST_TO_TADDR(((ClrDataModule*)mods[i])-> GetModule()); } USHORT jt = jn.Requested(modulePtr, tokens[i]); flags[i] = jt; } status = S_OK; } } } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::SetCodeNotifications( /* [in] */ ULONG32 numTokens, /* [in, size_is(numTokens)] */ IXCLRDataModule* mods[], /* [in] */ IXCLRDataModule* singleMod, /* [in, size_is(numTokens)] */ mdMethodDef tokens[], /* [in, size_is(numTokens)] */ ULONG32 flags[], /* [in] */ ULONG32 singleFlags) { HRESULT status = E_UNEXPECTED; DAC_ENTER(); EX_TRY { if ((tokens == NULL) || (mods == NULL && singleMod == NULL) || (mods != NULL && singleMod != NULL)) { status = E_INVALIDARG; } else { JITNotifications jn(GetHostJitNotificationTable()); if (!jn.IsActive() || numTokens > jn.GetTableSize()) { status = E_OUTOFMEMORY; } else { BOOL changedTable = FALSE; // Are flags valid? if (flags) { for (ULONG32 check = 0; check < numTokens; check++) { if (!IsValidMethodCodeNotification(flags[check])) { status = E_INVALIDARG; goto Exit; } } } else if (!IsValidMethodCodeNotification(singleFlags)) { status = E_INVALIDARG; goto Exit; } TADDR modulePtr = NULL; if (singleMod) { modulePtr = PTR_HOST_TO_TADDR(((ClrDataModule*)singleMod)-> GetModule()); } for (ULONG32 i = 0; i < numTokens; i++) { if (singleMod == NULL) { modulePtr = PTR_HOST_TO_TADDR(((ClrDataModule*)mods[i])-> GetModule()); } USHORT curFlags = jn.Requested(modulePtr, tokens[i]); USHORT setFlags = (USHORT)(flags ? flags[i] : singleFlags); if (curFlags != setFlags) { if (!jn.SetNotification(modulePtr, tokens[i], setFlags)) { status = E_FAIL; goto Exit; } changedTable = TRUE; } } if (!changedTable || (changedTable && jn.UpdateOutOfProcTable())) { status = S_OK; } } } Exit: ; } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT ClrDataAccess::GetOtherNotificationFlags( /* [out] */ ULONG32* flags) { HRESULT status; DAC_ENTER(); EX_TRY { *flags = g_dacNotificationFlags; status = S_OK; } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT ClrDataAccess::SetOtherNotificationFlags( /* [in] */ ULONG32 flags) { HRESULT status; if ((flags & ~(CLRDATA_NOTIFY_ON_MODULE_LOAD | CLRDATA_NOTIFY_ON_MODULE_UNLOAD | CLRDATA_NOTIFY_ON_EXCEPTION | CLRDATA_NOTIFY_ON_EXCEPTION_CATCH_ENTER)) != 0) { return E_INVALIDARG; } DAC_ENTER(); EX_TRY { g_dacNotificationFlags = flags; status = S_OK; } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } enum { STUB_BUF_FLAGS_START, STUB_BUF_METHOD_JITTED, STUB_BUF_FRAME_PUSHED, STUB_BUF_STUB_MANAGER_PUSHED, STUB_BUF_FLAGS_END, }; union STUB_BUF { CLRDATA_FOLLOW_STUB_BUFFER apiBuf; struct { ULONG64 flags; ULONG64 addr; ULONG64 arg1; } u; }; HRESULT ClrDataAccess::FollowStubStep( /* [in] */ Thread* thread, /* [in] */ ULONG32 inFlags, /* [in] */ TADDR inAddr, /* [in] */ union STUB_BUF* inBuffer, /* [out] */ TADDR* outAddr, /* [out] */ union STUB_BUF* outBuffer, /* [out] */ ULONG32* outFlags) { TraceDestination trace; bool traceDone = false; BYTE* retAddr; T_CONTEXT localContext; REGDISPLAY regDisp; MethodDesc* methodDesc; ZeroMemory(outBuffer, sizeof(*outBuffer)); if (inBuffer) { switch(inBuffer->u.flags) { case STUB_BUF_METHOD_JITTED: if (inAddr != GFN_TADDR(DACNotifyCompilationFinished)) { return E_INVALIDARG; } // It's possible that this notification is // for a different method, so double-check // and recycle the notification if necessary. methodDesc = PTR_MethodDesc(CORDB_ADDRESS_TO_TADDR(inBuffer->u.addr)); if (methodDesc->HasNativeCode()) { *outAddr = methodDesc->GetNativeCode(); *outFlags = CLRDATA_FOLLOW_STUB_EXIT; return S_OK; } // We didn't end up with native code so try again. trace.InitForUnjittedMethod(methodDesc); traceDone = true; break; case STUB_BUF_FRAME_PUSHED: if (!thread || inAddr != inBuffer->u.addr) { return E_INVALIDARG; } trace.InitForFramePush(CORDB_ADDRESS_TO_TADDR(inBuffer->u.addr)); DacGetThreadContext(thread, &localContext); thread->FillRegDisplay(&regDisp, &localContext); if (!thread->GetFrame()-> TraceFrame(thread, TRUE, &trace, &regDisp)) { return E_FAIL; } traceDone = true; break; case STUB_BUF_STUB_MANAGER_PUSHED: if (!thread || inAddr != inBuffer->u.addr || !inBuffer->u.arg1) { return E_INVALIDARG; } trace.InitForManagerPush(CORDB_ADDRESS_TO_TADDR(inBuffer->u.addr), PTR_StubManager(CORDB_ADDRESS_TO_TADDR(inBuffer->u.arg1))); DacGetThreadContext(thread, &localContext); if (!trace.GetStubManager()-> TraceManager(thread, &trace, &localContext, &retAddr)) { return E_FAIL; } traceDone = true; break; default: return E_INVALIDARG; } } if ((!traceDone && !StubManager::TraceStub(inAddr, &trace)) || !StubManager::FollowTrace(&trace)) { return E_NOINTERFACE; } switch(trace.GetTraceType()) { case TRACE_UNMANAGED: case TRACE_MANAGED: // We've hit non-stub code so we're done. *outAddr = trace.GetAddress(); *outFlags = CLRDATA_FOLLOW_STUB_EXIT; break; case TRACE_UNJITTED_METHOD: // The stub causes jitting, so return // the address of the jit-complete routine // so that the real native address can // be picked up once the JIT is done. methodDesc = trace.GetMethodDesc(); *outAddr = GFN_TADDR(DACNotifyCompilationFinished); outBuffer->u.flags = STUB_BUF_METHOD_JITTED; outBuffer->u.addr = PTR_HOST_TO_TADDR(methodDesc); *outFlags = CLRDATA_FOLLOW_STUB_INTERMEDIATE; break; case TRACE_FRAME_PUSH: if (!thread) { return E_INVALIDARG; } *outAddr = trace.GetAddress(); outBuffer->u.flags = STUB_BUF_FRAME_PUSHED; outBuffer->u.addr = trace.GetAddress(); *outFlags = CLRDATA_FOLLOW_STUB_INTERMEDIATE; break; case TRACE_MGR_PUSH: if (!thread) { return E_INVALIDARG; } *outAddr = trace.GetAddress(); outBuffer->u.flags = STUB_BUF_STUB_MANAGER_PUSHED; outBuffer->u.addr = trace.GetAddress(); outBuffer->u.arg1 = PTR_HOST_TO_TADDR(trace.GetStubManager()); *outFlags = CLRDATA_FOLLOW_STUB_INTERMEDIATE; break; default: return E_INVALIDARG; } return S_OK; } HRESULT STDMETHODCALLTYPE ClrDataAccess::FollowStub( /* [in] */ ULONG32 inFlags, /* [in] */ CLRDATA_ADDRESS inAddr, /* [in] */ CLRDATA_FOLLOW_STUB_BUFFER* _inBuffer, /* [out] */ CLRDATA_ADDRESS* outAddr, /* [out] */ CLRDATA_FOLLOW_STUB_BUFFER* _outBuffer, /* [out] */ ULONG32* outFlags) { return FollowStub2(NULL, inFlags, inAddr, _inBuffer, outAddr, _outBuffer, outFlags); } HRESULT STDMETHODCALLTYPE ClrDataAccess::FollowStub2( /* [in] */ IXCLRDataTask* task, /* [in] */ ULONG32 inFlags, /* [in] */ CLRDATA_ADDRESS _inAddr, /* [in] */ CLRDATA_FOLLOW_STUB_BUFFER* _inBuffer, /* [out] */ CLRDATA_ADDRESS* _outAddr, /* [out] */ CLRDATA_FOLLOW_STUB_BUFFER* _outBuffer, /* [out] */ ULONG32* outFlags) { HRESULT status; if ((inFlags & ~(CLRDATA_FOLLOW_STUB_DEFAULT)) != 0) { return E_INVALIDARG; } STUB_BUF* inBuffer = (STUB_BUF*)_inBuffer; STUB_BUF* outBuffer = (STUB_BUF*)_outBuffer; if (inBuffer && (inBuffer->u.flags <= STUB_BUF_FLAGS_START || inBuffer->u.flags >= STUB_BUF_FLAGS_END)) { return E_INVALIDARG; } DAC_ENTER(); EX_TRY { STUB_BUF cycleBuf; TADDR inAddr = TO_TADDR(_inAddr); TADDR outAddr; Thread* thread = task ? ((ClrDataTask*)task)->GetThread() : NULL; ULONG32 loops = 4; for (;;) { if ((status = FollowStubStep(thread, inFlags, inAddr, inBuffer, &outAddr, outBuffer, outFlags)) != S_OK) { break; } // Some stub tracing just requests further iterations // of processing, so detect that case and loop. if (outAddr != inAddr) { // We can make forward progress, we're done. *_outAddr = TO_CDADDR(outAddr); break; } // We need more processing. As a protection // against infinite loops in corrupted or buggy // situations, we only allow this to happen a // small number of times. if (--loops == 0) { ZeroMemory(outBuffer, sizeof(*outBuffer)); status = E_FAIL; break; } cycleBuf = *outBuffer; inBuffer = &cycleBuf; } } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } #ifdef _MSC_VER #pragma warning(push) #pragma warning(disable:4297) #endif // _MSC_VER STDMETHODIMP ClrDataAccess::GetGcNotification(GcEvtArgs* gcEvtArgs) { HRESULT status; DAC_ENTER(); EX_TRY { if (gcEvtArgs->typ >= GC_EVENT_TYPE_MAX) { status = E_INVALIDARG; } else { GcNotifications gn(GetHostGcNotificationTable()); if (!gn.IsActive()) { status = E_OUTOFMEMORY; } else { GcEvtArgs *res = gn.GetNotification(*gcEvtArgs); if (res != NULL) { *gcEvtArgs = *res; status = S_OK; } else { status = E_FAIL; } } } } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } STDMETHODIMP ClrDataAccess::SetGcNotification(IN GcEvtArgs gcEvtArgs) { HRESULT status; DAC_ENTER(); EX_TRY { if (gcEvtArgs.typ >= GC_EVENT_TYPE_MAX) { status = E_INVALIDARG; } else { GcNotifications gn(GetHostGcNotificationTable()); if (!gn.IsActive()) { status = E_OUTOFMEMORY; } else { if (gn.SetNotification(gcEvtArgs) && gn.UpdateOutOfProcTable()) { status = S_OK; } else { status = E_FAIL; } } } } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } #ifdef _MSC_VER #pragma warning(pop) #endif // _MSC_VER HRESULT ClrDataAccess::Initialize(void) { HRESULT hr; CLRDATA_ADDRESS base; // // We do not currently support cross-platform // debugging. Verify that cross-platform is not // being attempted. // // Determine our platform based on the pre-processor macros set when we were built #ifdef TARGET_UNIX #if defined(TARGET_X86) CorDebugPlatform hostPlatform = CORDB_PLATFORM_POSIX_X86; #elif defined(TARGET_AMD64) CorDebugPlatform hostPlatform = CORDB_PLATFORM_POSIX_AMD64; #elif defined(TARGET_ARM) CorDebugPlatform hostPlatform = CORDB_PLATFORM_POSIX_ARM; #elif defined(TARGET_ARM64) CorDebugPlatform hostPlatform = CORDB_PLATFORM_POSIX_ARM64; #else #error Unknown Processor. #endif #else #if defined(TARGET_X86) CorDebugPlatform hostPlatform = CORDB_PLATFORM_WINDOWS_X86; #elif defined(TARGET_AMD64) CorDebugPlatform hostPlatform = CORDB_PLATFORM_WINDOWS_AMD64; #elif defined(TARGET_ARM) CorDebugPlatform hostPlatform = CORDB_PLATFORM_WINDOWS_ARM; #elif defined(TARGET_ARM64) CorDebugPlatform hostPlatform = CORDB_PLATFORM_WINDOWS_ARM64; #else #error Unknown Processor. #endif #endif CorDebugPlatform targetPlatform; IfFailRet(m_pTarget->GetPlatform(&targetPlatform)); if (targetPlatform != hostPlatform) { // DAC fatal error: Platform mismatch - the platform reported by the data target // is not what this version of mscordacwks.dll was built for. return CORDBG_E_UNCOMPATIBLE_PLATFORMS; } // // Get the current DLL base for mscorwks globals. // In case of multiple-CLRs, there may be multiple dlls named "mscorwks". // code:OpenVirtualProcess can take the base address (clrInstanceId) to select exactly // which CLR to is being target. If so, m_globalBase will already be set. // if (m_globalBase == 0) { // Caller didn't specify which CLR to debug, we should be using a legacy data target. if (m_pLegacyTarget == NULL) { DacError(E_INVALIDARG); UNREACHABLE(); } ReleaseHolder<ICLRRuntimeLocator> pRuntimeLocator(NULL); if (m_pLegacyTarget->QueryInterface(__uuidof(ICLRRuntimeLocator), (void**)&pRuntimeLocator) != S_OK || pRuntimeLocator->GetRuntimeBase(&base) != S_OK) { IfFailRet(m_pLegacyTarget->GetImageBase(TARGET_MAIN_CLR_DLL_NAME_W, &base)); } m_globalBase = TO_TADDR(base); } // We don't need to try too hard to prevent // multiple initializations as each one will // copy the same data into the globals and so // cannot interfere with each other. if (!s_procInit) { IfFailRet(GetDacGlobals()); IfFailRet(DacGetHostVtPtrs()); s_procInit = true; } // // DAC is now setup and ready to use // // Do some validation IfFailRet(VerifyDlls()); return S_OK; } Thread* ClrDataAccess::FindClrThreadByTaskId(ULONG64 taskId) { Thread* thread = NULL; if (!ThreadStore::s_pThreadStore) { return NULL; } while ((thread = ThreadStore::GetAllThreadList(thread, 0, 0))) { if (thread->GetThreadId() == (DWORD)taskId) { return thread; } } return NULL; } HRESULT ClrDataAccess::IsPossibleCodeAddress(IN TADDR address) { SUPPORTS_DAC; BYTE testRead; ULONG32 testDone; // First do a trivial check on the readability of the // address. This makes for quick rejection of bogus // addresses that the debugger sends in when searching // stacks for return addresses. // XXX Microsoft - Will this cause problems in minidumps // where it's possible the stub is identifiable but // the stub code isn't present? Yes, but the lack // of that code could confuse the walker on its own // if it does code analysis. if ((m_pTarget->ReadVirtual(address, &testRead, sizeof(testRead), &testDone) != S_OK) || !testDone) { return E_INVALIDARG; } return S_OK; } HRESULT ClrDataAccess::GetFullMethodName( IN MethodDesc* methodDesc, IN ULONG32 symbolChars, OUT ULONG32* symbolLen, _Out_writes_to_opt_(symbolChars, *symbolLen) LPWSTR symbol ) { StackSString s; #ifdef FEATURE_MINIMETADATA_IN_TRIAGEDUMPS PAL_CPP_TRY { #endif // FEATURE_MINIMETADATA_IN_TRIAGEDUMPS TypeString::AppendMethodInternal(s, methodDesc, TypeString::FormatSignature|TypeString::FormatNamespace|TypeString::FormatFullInst); #ifdef FEATURE_MINIMETADATA_IN_TRIAGEDUMPS } PAL_CPP_CATCH_ALL { if (!MdCacheGetEEName(dac_cast<TADDR>(methodDesc), s)) { PAL_CPP_RETHROW; } } PAL_CPP_ENDTRY #endif // FEATURE_MINIMETADATA_IN_TRIAGEDUMPS if (symbol) { // Copy as much as we can and truncate the rest. wcsncpy_s(symbol, symbolChars, s.GetUnicode(), _TRUNCATE); } if (symbolLen) *symbolLen = s.GetCount() + 1; if (symbol != NULL && symbolChars < (s.GetCount() + 1)) return S_FALSE; else return S_OK; } PCSTR ClrDataAccess::GetJitHelperName( IN TADDR address, IN bool dynamicHelpersOnly /*=false*/ ) { const static PCSTR s_rgHelperNames[] = { #define JITHELPER(code,fn,sig) #code, #include <jithelpers.h> }; static_assert_no_msg(ARRAY_SIZE(s_rgHelperNames) == CORINFO_HELP_COUNT); #ifdef TARGET_UNIX if (!dynamicHelpersOnly) #else if (!dynamicHelpersOnly && g_runtimeLoadedBaseAddress <= address && address < g_runtimeLoadedBaseAddress + g_runtimeVirtualSize) #endif // TARGET_UNIX { // Read the whole table from the target in one shot for better performance VMHELPDEF * pTable = static_cast<VMHELPDEF *>( PTR_READ(dac_cast<TADDR>(&hlpFuncTable), CORINFO_HELP_COUNT * sizeof(VMHELPDEF))); for (int i = 0; i < CORINFO_HELP_COUNT; i++) { if (address == (TADDR)(pTable[i].pfnHelper)) return s_rgHelperNames[i]; } } // Check if its a dynamically generated JIT helper const static CorInfoHelpFunc s_rgDynamicHCallIds[] = { #define DYNAMICJITHELPER(code, fn, sig) code, #define JITHELPER(code, fn,sig) #include <jithelpers.h> }; // Read the whole table from the target in one shot for better performance VMHELPDEF * pDynamicTable = static_cast<VMHELPDEF *>( PTR_READ(dac_cast<TADDR>(&hlpDynamicFuncTable), DYNAMIC_CORINFO_HELP_COUNT * sizeof(VMHELPDEF))); for (unsigned d = 0; d < DYNAMIC_CORINFO_HELP_COUNT; d++) { if (address == (TADDR)(pDynamicTable[d].pfnHelper)) { return s_rgHelperNames[s_rgDynamicHCallIds[d]]; } } return NULL; } HRESULT ClrDataAccess::RawGetMethodName( /* [in] */ CLRDATA_ADDRESS address, /* [in] */ ULONG32 flags, /* [in] */ ULONG32 bufLen, /* [out] */ ULONG32 *symbolLen, /* [size_is][out] */ _Out_writes_bytes_opt_(bufLen) WCHAR symbolBuf[ ], /* [out] */ CLRDATA_ADDRESS* displacement) { #ifdef TARGET_ARM _ASSERTE((address & THUMB_CODE) == 0); address &= ~THUMB_CODE; #endif const UINT k_cch64BitHexFormat = ARRAY_SIZE("1234567812345678"); HRESULT status; if (flags != 0) { return E_INVALIDARG; } TADDR taddr; if( (status = TRY_CLRDATA_ADDRESS_TO_TADDR(address, &taddr)) != S_OK ) { return status; } if ((status = IsPossibleCodeAddress(taddr)) != S_OK) { return status; } PTR_StubManager pStubManager; MethodDesc* methodDesc = NULL; { EECodeInfo codeInfo(TO_TADDR(address)); if (codeInfo.IsValid()) { if (displacement) { *displacement = codeInfo.GetRelOffset(); } methodDesc = codeInfo.GetMethodDesc(); goto NameFromMethodDesc; } } pStubManager = StubManager::FindStubManager(TO_TADDR(address)); if (pStubManager != NULL) { if (displacement) { *displacement = 0; } // // Special-cased stub managers // if (pStubManager == PrecodeStubManager::g_pManager) { PCODE alignedAddress = AlignDown(TO_TADDR(address), PRECODE_ALIGNMENT); #ifdef TARGET_ARM alignedAddress += THUMB_CODE; #endif SIZE_T maxPrecodeSize = sizeof(StubPrecode); #ifdef HAS_THISPTR_RETBUF_PRECODE maxPrecodeSize = max(maxPrecodeSize, sizeof(ThisPtrRetBufPrecode)); #endif for (SIZE_T i = 0; i < maxPrecodeSize / PRECODE_ALIGNMENT; i++) { EX_TRY { // Try to find matching precode entrypoint Precode* pPrecode = Precode::GetPrecodeFromEntryPoint(alignedAddress, TRUE); if (pPrecode != NULL) { methodDesc = pPrecode->GetMethodDesc(); if (methodDesc != NULL) { if (DacValidateMD(methodDesc)) { if (displacement) { *displacement = TO_TADDR(address) - PCODEToPINSTR(alignedAddress); } goto NameFromMethodDesc; } } } alignedAddress -= PRECODE_ALIGNMENT; } EX_CATCH { } EX_END_CATCH(SwallowAllExceptions) } } else if (pStubManager == JumpStubStubManager::g_pManager) { PCODE pTarget = decodeBackToBackJump(TO_TADDR(address)); HRESULT hr = GetRuntimeNameByAddress(pTarget, flags, bufLen, symbolLen, symbolBuf, NULL); if (SUCCEEDED(hr)) { return hr; } PCSTR pHelperName = GetJitHelperName(pTarget); if (pHelperName != NULL) { hr = ConvertUtf8(pHelperName, bufLen, symbolLen, symbolBuf); if (FAILED(hr)) return S_FALSE; return hr; } } static WCHAR s_wszFormatNameWithStubManager[] = W("CLRStub[%s]@%I64x"); LPCWSTR wszStubManagerName = pStubManager->GetStubManagerName(TO_TADDR(address)); _ASSERTE(wszStubManagerName != NULL); int result = _snwprintf_s( symbolBuf, bufLen, _TRUNCATE, s_wszFormatNameWithStubManager, wszStubManagerName, // Arg 1 = stub name TO_TADDR(address)); // Arg 2 = stub hex address if (result != -1) { // Printf succeeded, so we have an exact char count to return if (symbolLen) { size_t cchSymbol = wcslen(symbolBuf) + 1; if (!FitsIn<ULONG32>(cchSymbol)) return COR_E_OVERFLOW; *symbolLen = (ULONG32) cchSymbol; } return S_OK; } // Printf failed. Estimate a size that will be at least big enough to hold the name if (symbolLen) { size_t cchSymbol = ARRAY_SIZE(s_wszFormatNameWithStubManager) + wcslen(wszStubManagerName) + k_cch64BitHexFormat + 1; if (!FitsIn<ULONG32>(cchSymbol)) return COR_E_OVERFLOW; *symbolLen = (ULONG32) cchSymbol; } return S_FALSE; } // Do not waste time looking up name for static helper. Debugger can get the actual name from .pdb. PCSTR pHelperName; pHelperName = GetJitHelperName(TO_TADDR(address), true /* dynamicHelpersOnly */); if (pHelperName != NULL) { if (displacement) { *displacement = 0; } HRESULT hr = ConvertUtf8(pHelperName, bufLen, symbolLen, symbolBuf); if (FAILED(hr)) return S_FALSE; return S_OK; } return E_NOINTERFACE; NameFromMethodDesc: if (methodDesc->GetClassification() == mcDynamic && !methodDesc->GetSig()) { // XXX Microsoft - Should this case have a more specific name? static WCHAR s_wszFormatNameAddressOnly[] = W("CLRStub@%I64x"); int result = _snwprintf_s( symbolBuf, bufLen, _TRUNCATE, s_wszFormatNameAddressOnly, TO_TADDR(address)); if (result != -1) { // Printf succeeded, so we have an exact char count to return if (symbolLen) { size_t cchSymbol = wcslen(symbolBuf) + 1; if (!FitsIn<ULONG32>(cchSymbol)) return COR_E_OVERFLOW; *symbolLen = (ULONG32) cchSymbol; } return S_OK; } // Printf failed. Estimate a size that will be at least big enough to hold the name if (symbolLen) { size_t cchSymbol = ARRAY_SIZE(s_wszFormatNameAddressOnly) + k_cch64BitHexFormat + 1; if (!FitsIn<ULONG32>(cchSymbol)) return COR_E_OVERFLOW; *symbolLen = (ULONG32) cchSymbol; } return S_FALSE; } return GetFullMethodName(methodDesc, bufLen, symbolLen, symbolBuf); } HRESULT ClrDataAccess::GetMethodExtents(MethodDesc* methodDesc, METH_EXTENTS** extents) { CLRDATA_ADDRESS_RANGE* curExtent; { // // Get the information from the methoddesc. // We'll go through the CodeManager + JitManagers, so this should work // for all types of managed code. // PCODE methodStart = methodDesc->GetNativeCode(); if (!methodStart) { return E_NOINTERFACE; } EECodeInfo codeInfo(methodStart); _ASSERTE(codeInfo.IsValid()); TADDR codeSize = codeInfo.GetCodeManager()->GetFunctionSize(codeInfo.GetGCInfoToken()); *extents = new (nothrow) METH_EXTENTS; if (!*extents) { return E_OUTOFMEMORY; } (*extents)->numExtents = 1; curExtent = (*extents)->extents; curExtent->startAddress = TO_CDADDR(methodStart); curExtent->endAddress = curExtent->startAddress + codeSize; curExtent++; } (*extents)->curExtent = 0; return S_OK; } // Allocator to pass to the debug-info-stores... BYTE* DebugInfoStoreNew(void * pData, size_t cBytes) { return new (nothrow) BYTE[cBytes]; } HRESULT ClrDataAccess::GetMethodVarInfo(MethodDesc* methodDesc, TADDR address, ULONG32* numVarInfo, ICorDebugInfo::NativeVarInfo** varInfo, ULONG32* codeOffset) { SUPPORTS_DAC; COUNT_T countNativeVarInfo; NewHolder<ICorDebugInfo::NativeVarInfo> nativeVars(NULL); TADDR nativeCodeStartAddr; if (address != NULL) { NativeCodeVersion requestedNativeCodeVersion = ExecutionManager::GetNativeCodeVersion(address); if (requestedNativeCodeVersion.IsNull() || requestedNativeCodeVersion.GetNativeCode() == NULL) { return E_INVALIDARG; } nativeCodeStartAddr = PCODEToPINSTR(requestedNativeCodeVersion.GetNativeCode()); } else { nativeCodeStartAddr = PCODEToPINSTR(methodDesc->GetNativeCode()); } DebugInfoRequest request; request.InitFromStartingAddr(methodDesc, nativeCodeStartAddr); BOOL success = DebugInfoManager::GetBoundariesAndVars( request, DebugInfoStoreNew, NULL, // allocator NULL, NULL, &countNativeVarInfo, &nativeVars); if (!success) { return E_FAIL; } if (!nativeVars || !countNativeVarInfo) { return E_NOINTERFACE; } *numVarInfo = countNativeVarInfo; *varInfo = nativeVars; nativeVars.SuppressRelease(); // To prevent NewHolder from releasing the memory if (codeOffset) { *codeOffset = (ULONG32)(address - nativeCodeStartAddr); } return S_OK; } HRESULT ClrDataAccess::GetMethodNativeMap(MethodDesc* methodDesc, TADDR address, ULONG32* numMap, DebuggerILToNativeMap** map, bool* mapAllocated, CLRDATA_ADDRESS* codeStart, ULONG32* codeOffset) { _ASSERTE((codeOffset == NULL) || (address != NULL)); // Use the DebugInfoStore to get IL->Native maps. // It doesn't matter whether we're jitted, ngenned etc. TADDR nativeCodeStartAddr; if (address != NULL) { NativeCodeVersion requestedNativeCodeVersion = ExecutionManager::GetNativeCodeVersion(address); if (requestedNativeCodeVersion.IsNull() || requestedNativeCodeVersion.GetNativeCode() == NULL) { return E_INVALIDARG; } nativeCodeStartAddr = PCODEToPINSTR(requestedNativeCodeVersion.GetNativeCode()); } else { nativeCodeStartAddr = PCODEToPINSTR(methodDesc->GetNativeCode()); } DebugInfoRequest request; request.InitFromStartingAddr(methodDesc, nativeCodeStartAddr); // Bounds info. ULONG32 countMapCopy; NewHolder<ICorDebugInfo::OffsetMapping> mapCopy(NULL); BOOL success = DebugInfoManager::GetBoundariesAndVars( request, DebugInfoStoreNew, NULL, // allocator &countMapCopy, &mapCopy, NULL, NULL); if (!success) { return E_FAIL; } // Need to convert map formats. *numMap = countMapCopy; *map = new (nothrow) DebuggerILToNativeMap[countMapCopy]; if (!*map) { return E_OUTOFMEMORY; } ULONG32 i; for (i = 0; i < *numMap; i++) { (*map)[i].ilOffset = mapCopy[i].ilOffset; (*map)[i].nativeStartOffset = mapCopy[i].nativeOffset; if (i > 0) { (*map)[i - 1].nativeEndOffset = (*map)[i].nativeStartOffset; } (*map)[i].source = mapCopy[i].source; } if (*numMap >= 1) { (*map)[i - 1].nativeEndOffset = 0; } // Update varion out params. if (codeStart) { *codeStart = TO_CDADDR(nativeCodeStartAddr); } if (codeOffset) { *codeOffset = (ULONG32)(address - nativeCodeStartAddr); } *mapAllocated = true; return S_OK; } // Get the MethodDesc for a function // Arguments: // Input: // pModule - pointer to the module for the function // memberRef - metadata token for the function // Return Value: // MethodDesc for the function MethodDesc * ClrDataAccess::FindLoadedMethodRefOrDef(Module* pModule, mdToken memberRef) { CONTRACT(MethodDesc *) { GC_NOTRIGGER; PRECONDITION(CheckPointer(pModule)); POSTCONDITION(CheckPointer(RETVAL, NULL_OK)); } CONTRACT_END; // Must have a MemberRef or a MethodDef mdToken tkType = TypeFromToken(memberRef); _ASSERTE((tkType == mdtMemberRef) || (tkType == mdtMethodDef)); if (tkType == mdtMemberRef) { RETURN pModule->LookupMemberRefAsMethod(memberRef); } RETURN pModule->LookupMethodDef(memberRef); } // FindLoadedMethodRefOrDef // // ReportMem - report a region of memory for dump gathering // // If you specify that you expect success, any failure will cause ReportMem to // return false. If you do not expect success, true is always returned. // This function only throws when all dump collection should be cancelled. // // Arguments: // addr - the starting target address for the memory to report // size - the length (in bytes) to report // fExpectSuccess - if true (the default), then we expect that this region of memory // should be fully readable. Any read errors indicate a corrupt target. // bool ClrDataAccess::ReportMem(TADDR addr, TSIZE_T size, bool fExpectSuccess /*= true*/) { SUPPORTS_DAC_HOST_ONLY; // This block of code is to help debugging blocks that we report // to minidump/heapdump. You can set break point here to view the static // variable to figure out the size of blocks that we are reporting. // Most useful is set conditional break point to catch large chuck of // memory. We will leave it here for all builds. // static TADDR debugAddr; static TSIZE_T debugSize; debugAddr = addr; debugSize = size; HRESULT status; if (!addr || addr == (TADDR)-1 || !size) { if (fExpectSuccess) return false; else return true; } // // Try and sanity-check the reported region of memory // #ifdef _DEBUG // in debug builds, sanity-check all reports const TSIZE_T k_minSizeToCheck = 1; #else // in retail builds, only sanity-check larger chunks which have the potential to waste a // lot of time and/or space. This avoids the overhead of checking for the majority of // memory regions (which are small). const TSIZE_T k_minSizeToCheck = 1024; #endif if (size >= k_minSizeToCheck) { if (!IsFullyReadable(addr, size)) { if (!fExpectSuccess) { // We know the read might fail (eg. we're trying to find mapped pages in // a module image), so just skip this block silently. // Note that the EnumMemoryRegion callback won't necessarily do anything if any part of // the region is unreadable, and so there is no point in calling it. For cases where we expect // the read might fail, but we want to report any partial blocks, we have to break up the region // into pages and try reporting each page anyway return true; } // We're reporting bogus memory, so the target must be corrupt (or there is a issue). We should abort // reporting and continue with the next data structure (where the exception is caught), // just like we would for a DAC read error (otherwise we might do something stupid // like get into an infinite loop, or otherwise waste time with corrupt data). TARGET_CONSISTENCY_CHECK(false, "Found unreadable memory while reporting memory regions for dump gathering"); return false; } } // Minidumps should never contain data structures that are anywhere near 4MB. If we see this, it's // probably due to memory corruption. To keep the dump small, we'll truncate the block. Note that // the size to which the block is truncated is pretty unique, so should be good evidence in a dump // that this has happened. // Note that it's hard to say what a good value would be here, or whether we should dump any of the // data structure at all. Hopefully experience will help guide this going forward. // @dbgtodo : Extend dump-gathering API to allow a dump-log to be included. const TSIZE_T kMaxMiniDumpRegion = 4*1024*1024 - 3; // 4MB-3 if( size > kMaxMiniDumpRegion && (m_enumMemFlags == CLRDATA_ENUM_MEM_MINI || m_enumMemFlags == CLRDATA_ENUM_MEM_TRIAGE)) { TARGET_CONSISTENCY_CHECK( false, "Dump target consistency failure - truncating minidump data structure"); size = kMaxMiniDumpRegion; } // track the total memory reported. m_cbMemoryReported += size; // ICLRData APIs take only 32-bit sizes. In practice this will almost always be sufficient, but // in theory we might have some >4GB ranges on large 64-bit processes doing a heap dump // (for example, the code:LoaderHeap). If necessary, break up the reporting into maximum 4GB // chunks so we can use the existing API. // @dbgtodo : ICorDebugDataTarget should probably use 64-bit sizes while (size) { ULONG32 enumSize; if (size > UINT32_MAX) { enumSize = UINT32_MAX; } else { enumSize = (ULONG32)size; } // Actually perform the memory reporting callback status = m_enumMemCb->EnumMemoryRegion(TO_CDADDR(addr), enumSize); if (status != S_OK) { // If dump generation was cancelled, allow us to throw upstack so we'll actually quit. if ((fExpectSuccess) && (status != COR_E_OPERATIONCANCELED)) return false; } // If the return value of EnumMemoryRegion is COR_E_OPERATIONCANCELED, // it means that user has requested that the minidump gathering be canceled. // To do this we throw an exception which is caught in EnumMemoryRegionsWrapper. if (status == COR_E_OPERATIONCANCELED) { ThrowHR(status); } // Move onto the next chunk (if any) size -= enumSize; addr += enumSize; } return true; } // // DacUpdateMemoryRegion - updates/poisons a region of memory of generated dump // // Parameters: // addr - target address of the beginning of the memory region // bufferSize - number of bytes to update/poison // buffer - data to be written at given target address // bool ClrDataAccess::DacUpdateMemoryRegion(TADDR addr, TSIZE_T bufferSize, BYTE* buffer) { SUPPORTS_DAC_HOST_ONLY; HRESULT status; if (!addr || addr == (TADDR)-1 || !bufferSize) { return false; } // track the total memory reported. m_cbMemoryReported += bufferSize; if (m_updateMemCb == NULL) { return false; } // Actually perform the memory updating callback status = m_updateMemCb->UpdateMemoryRegion(TO_CDADDR(addr), (ULONG32)bufferSize, buffer); if (status != S_OK) { return false; } return true; } // // Check whether a region of target memory is fully readable. // // Arguments: // addr The base target address of the region // size The size of the region to analyze // // Return value: // True if the entire regions appears to be readable, false otherwise. // // Notes: // The motivation here is that reporting large regions of unmapped address space to dbgeng can result in // it taking a long time trying to identify a valid subrange. This can happen when the target // memory is corrupt, and we enumerate a data structure with a dynamic size. Ideally we would just spec // the ICLRDataEnumMemoryRegionsCallback API to require the client to fail if it detects an unmapped // memory address in the region. However, we can't change the existing dbgeng code, so for now we'll // rely on this heuristic here. // @dbgtodo : Try and get the dbg team to change their EnumMemoryRegion behavior. See DevDiv Bugs 6265 // bool ClrDataAccess::IsFullyReadable(TADDR taBase, TSIZE_T dwSize) { // The only way we have to verify that a memory region is readable is to try reading it in it's // entirety. This is potentially expensive, so we'll rely on a heuristic that spot-checks various // points in the region. // Ensure we've got something to check if( dwSize == 0 ) return true; // Check for overflow TADDR taEnd = DacTAddrOffset(taBase, dwSize, 1); // Loop through using expontential growth, being sure to check both the first and last byte TADDR taCurr = taBase; TSIZE_T dwInc = 4096; bool bDone = false; while (!bDone) { // Try and read a byte from the target. Note that we don't use PTR_BYTE here because we don't want // the overhead of inserting entries into the DAC instance cache. BYTE b; ULONG32 dwBytesRead; HRESULT hr = m_pTarget->ReadVirtual(taCurr, &b, 1, &dwBytesRead); if( hr != S_OK || dwBytesRead < 1 ) { return false; } if (taEnd - taCurr <= 1) { // We just read the last byte so we're done _ASSERTE( taCurr = taEnd - 1 ); bDone = true; } else if (dwInc == 0 || dwInc >= taEnd - taCurr) { // we've reached the end of the exponential series, check the last byte taCurr = taEnd - 1; } else { // advance current pointer (subtraction above ensures this won't overflow) taCurr += dwInc; // double the increment for next time (or set to 0 if it's already the max) dwInc <<= 1; } } return true; } JITNotification* ClrDataAccess::GetHostJitNotificationTable() { if (m_jitNotificationTable == NULL) { m_jitNotificationTable = JITNotifications::InitializeNotificationTable(1000); } return m_jitNotificationTable; } GcNotification* ClrDataAccess::GetHostGcNotificationTable() { if (m_gcNotificationTable == NULL) { m_gcNotificationTable = GcNotifications::InitializeNotificationTable(128); } return m_gcNotificationTable; } /* static */ bool ClrDataAccess::GetMetaDataFileInfoFromPEFile(PEAssembly *pPEAssembly, DWORD &dwTimeStamp, DWORD &dwSize, DWORD &dwDataSize, DWORD &dwRvaHint, bool &isNGEN, _Out_writes_(cchFilePath) LPWSTR wszFilePath, const DWORD cchFilePath) { SUPPORTS_DAC_HOST_ONLY; PEImage *mdImage = NULL; PEImageLayout *layout; IMAGE_DATA_DIRECTORY *pDir = NULL; COUNT_T uniPathChars = 0; isNGEN = false; if (pDir == NULL || pDir->Size == 0) { mdImage = pPEAssembly->GetPEImage(); if (mdImage != NULL) { layout = mdImage->GetLoadedLayout(); pDir = &layout->GetCorHeader()->MetaData; // In IL image case, we do not have any hint to IL metadata since it is stored // in the corheader. // dwRvaHint = 0; dwDataSize = pDir->Size; } else { return false; } } // Do not fail if path can not be read. Triage dumps don't have paths and we want to fallback // on searching metadata from IL image. mdImage->GetPath().DacGetUnicode(cchFilePath, wszFilePath, &uniPathChars); if (!mdImage->HasNTHeaders() || !mdImage->HasCorHeader() || !mdImage->HasLoadedLayout() || (uniPathChars > cchFilePath)) { return false; } // It is possible that the module is in-memory. That is the wszFilePath here is empty. // We will try to use the module name instead in this case for hosting debugger // to find match. if (wcslen(wszFilePath) == 0) { mdImage->GetModuleFileNameHintForDAC().DacGetUnicode(cchFilePath, wszFilePath, &uniPathChars); if (uniPathChars > cchFilePath) { return false; } } dwTimeStamp = layout->GetTimeDateStamp(); dwSize = (ULONG32)layout->GetVirtualSize(); return true; } /* static */ bool ClrDataAccess::GetILImageInfoFromNgenPEFile(PEAssembly *pPEAssembly, DWORD &dwTimeStamp, DWORD &dwSize, _Out_writes_(cchFilePath) LPWSTR wszFilePath, const DWORD cchFilePath) { SUPPORTS_DAC_HOST_ONLY; DWORD dwWritten = 0; // use the IL File name if (!pPEAssembly->GetPath().DacGetUnicode(cchFilePath, wszFilePath, (COUNT_T *)(&dwWritten))) { // Use DAC hint to retrieve the IL name. pPEAssembly->GetModuleFileNameHint().DacGetUnicode(cchFilePath, wszFilePath, (COUNT_T *)(&dwWritten)); } dwTimeStamp = 0; dwSize = 0; return true; } void * ClrDataAccess::GetMetaDataFromHost(PEAssembly* pPEAssembly, bool* isAlternate) { DWORD imageTimestamp, imageSize, dataSize; void* buffer = NULL; WCHAR uniPath[MAX_LONGPATH] = {0}; bool isAlt = false; bool isNGEN = false; DAC_INSTANCE* inst = NULL; HRESULT hr = S_OK; DWORD ulRvaHint; // // We always ask for the IL image metadata, // as we expect that to be more // available than others. The drawback is that // there may be differences between the IL image // metadata and native image metadata, so we // have to mark such alternate metadata so that // we can fail unsupported usage of it. // // Microsoft - above comment seems to be an unimplemented thing. // The DAC_MD_IMPORT.isAlternate field gets ultimately set, but // on the searching I did, I cannot find any usage of it // other than in the ctor. Should we be doing something, or should // we remove this comment and the isAlternate field? // It's possible that test will want us to track whether we have // an IL image's metadata loaded against an NGEN'ed image // so the field remains for now. if (!ClrDataAccess::GetMetaDataFileInfoFromPEFile( pPEAssembly, imageTimestamp, imageSize, dataSize, ulRvaHint, isNGEN, uniPath, ARRAY_SIZE(uniPath))) { return NULL; } // try direct match for the image that is loaded into the managed process pPEAssembly->GetLoadedMetadata((COUNT_T *)(&dataSize)); DWORD allocSize = 0; if (!ClrSafeInt<DWORD>::addition(dataSize, sizeof(DAC_INSTANCE), allocSize)) { DacError(HRESULT_FROM_WIN32(ERROR_ARITHMETIC_OVERFLOW)); } inst = m_instances.Alloc(0, allocSize, DAC_DPTR); if (!inst) { DacError(E_OUTOFMEMORY); return NULL; } buffer = (void*)(inst + 1); // APIs implemented by hosting debugger. It can use the path/filename, timestamp, and // pPEAssembly size to find an exact match for the image. If that fails for an ngen'ed image, // we can request the IL image which it came from. if (m_legacyMetaDataLocator) { // Legacy API implemented by hosting debugger. hr = m_legacyMetaDataLocator->GetMetadata( uniPath, imageTimestamp, imageSize, NULL, // MVID - not used yet ulRvaHint, 0, // flags - reserved for future. dataSize, (BYTE*)buffer, NULL); } else { hr = m_target3->GetMetaData( uniPath, imageTimestamp, imageSize, NULL, // MVID - not used yet ulRvaHint, 0, // flags - reserved for future. dataSize, (BYTE*)buffer, NULL); } if (FAILED(hr) && isNGEN) { // We failed to locate the ngen'ed image. We should try to // find the matching IL image // isAlt = true; if (!ClrDataAccess::GetILImageInfoFromNgenPEFile( pPEAssembly, imageTimestamp, imageSize, uniPath, ARRAY_SIZE(uniPath))) { goto ErrExit; } const WCHAR* ilExtension = W("dll"); WCHAR ngenImageName[MAX_LONGPATH] = {0}; if (wcscpy_s(ngenImageName, ARRAY_SIZE(ngenImageName), uniPath) != 0) { goto ErrExit; } if (wcscpy_s(uniPath, ARRAY_SIZE(uniPath), ngenImageName) != 0) { goto ErrExit; } // RVA size in ngen image and IL image is the same. Because the only // different is in RVA. That is 4 bytes column fixed. // // try again if (m_legacyMetaDataLocator) { hr = m_legacyMetaDataLocator->GetMetadata( uniPath, imageTimestamp, imageSize, NULL, // MVID - not used yet 0, // pass zero hint here... important 0, // flags - reserved for future. dataSize, (BYTE*)buffer, NULL); } else { hr = m_target3->GetMetaData( uniPath, imageTimestamp, imageSize, NULL, // MVID - not used yet 0, // pass zero hint here... important 0, // flags - reserved for future. dataSize, (BYTE*)buffer, NULL); } } if (FAILED(hr)) { goto ErrExit; } *isAlternate = isAlt; m_instances.AddSuperseded(inst); return buffer; ErrExit: if (inst != NULL) { m_instances.ReturnAlloc(inst); } return NULL; } //++++++++++++++++++++++++++++++++++++++++++++++++++++++++ // // Given a PEAssembly or a ReflectionModule try to find the corresponding metadata // We will first ask debugger to locate it. If fail, we will try // to get it from the target process // //++++++++++++++++++++++++++++++++++++++++++++++++++++++++ IMDInternalImport* ClrDataAccess::GetMDImport(const PEAssembly* pPEAssembly, const ReflectionModule* reflectionModule, bool throwEx) { HRESULT status; PTR_CVOID mdBaseTarget = NULL; COUNT_T mdSize; IMDInternalImport* mdImport = NULL; PVOID mdBaseHost = NULL; bool isAlternate = false; _ASSERTE((pPEAssembly == NULL && reflectionModule != NULL) || (pPEAssembly != NULL && reflectionModule == NULL)); TADDR peAssemblyAddr = (pPEAssembly != NULL) ? dac_cast<TADDR>(pPEAssembly) : dac_cast<TADDR>(reflectionModule); // // Look for one we've already created. // mdImport = m_mdImports.Get(peAssemblyAddr); if (mdImport != NULL) { return mdImport; } if (pPEAssembly != NULL) { // Get the metadata size mdBaseTarget = const_cast<PEAssembly*>(pPEAssembly)->GetLoadedMetadata(&mdSize); } else if (reflectionModule != NULL) { // Get the metadata PTR_SBuffer metadataBuffer = reflectionModule->GetDynamicMetadataBuffer(); if (metadataBuffer != PTR_NULL) { mdBaseTarget = dac_cast<PTR_CVOID>((metadataBuffer->DacGetRawBuffer()).StartAddress()); mdSize = metadataBuffer->GetSize(); } else { if (throwEx) { DacError(E_FAIL); } return NULL; } } else { if (throwEx) { DacError(E_FAIL); } return NULL; } if (mdBaseTarget == PTR_NULL) { mdBaseHost = NULL; } else { // // Maybe the target process has the metadata // Find out where the metadata for the image is // in the target's memory. // // // Read the metadata into the host process. Make sure pass in false in the last // parameter. This is only matters when producing skinny mini-dump. This will // prevent metadata gets reported into mini-dump. // mdBaseHost = DacInstantiateTypeByAddressNoReport(dac_cast<TADDR>(mdBaseTarget), mdSize, false); } // Try to see if debugger can locate it if (pPEAssembly != NULL && mdBaseHost == NULL && (m_target3 || m_legacyMetaDataLocator)) { // We couldn't read the metadata from memory. Ask // the target for metadata as it may be able to // provide it from some alternate means. mdBaseHost = GetMetaDataFromHost(const_cast<PEAssembly *>(pPEAssembly), &isAlternate); } if (mdBaseHost == NULL) { // cannot locate metadata anywhere if (throwEx) { DacError(E_INVALIDARG); } return NULL; } // // Open the MD interface on the host copy of the metadata. // status = GetMDInternalInterface(mdBaseHost, mdSize, ofRead, IID_IMDInternalImport, (void**)&mdImport); if (status != S_OK) { if (throwEx) { DacError(status); } return NULL; } // // Remember the object for this module for // possible later use. // The m_mdImports list does get cleaned up by calls to ClrDataAccess::Flush, // i.e. every time the process changes state. if (m_mdImports.Add(peAssemblyAddr, mdImport, isAlternate) == NULL) { mdImport->Release(); DacError(E_OUTOFMEMORY); } return mdImport; } // // Set whether inconsistencies in the target should raise asserts. // This overrides the default initial setting. // // Arguments: // fEnableAsserts - whether ASSERTs in dacized code should be enabled // void ClrDataAccess::SetTargetConsistencyChecks(bool fEnableAsserts) { LIMITED_METHOD_DAC_CONTRACT; m_fEnableTargetConsistencyAsserts = fEnableAsserts; } // // Get whether inconsistencies in the target should raise asserts. // // Return value: // whether ASSERTs in dacized code should be enabled // // Notes: // The implementation of ASSERT accesses this via code:DacTargetConsistencyAssertsEnabled // // By default, this is disabled, unless COMPlus_DbgDACEnableAssert is set (see code:ClrDataAccess::ClrDataAccess). // This is necessary for compatibility. For example, SOS expects to be able to scan for // valid MethodTables etc. (which may cause ASSERTs), and also doesn't want ASSERTs when working // with targets with corrupted memory. // // Calling code:ClrDataAccess::SetTargetConsistencyChecks overrides the default setting. // bool ClrDataAccess::TargetConsistencyAssertsEnabled() { LIMITED_METHOD_DAC_CONTRACT; return m_fEnableTargetConsistencyAsserts; } // // VerifyDlls - Validate that the mscorwks in the target matches this version of mscordacwks // Only done on Windows and Mac builds at the moment. // See code:CordbProcess::CordbProcess#DBIVersionChecking for more information regarding version checking. // HRESULT ClrDataAccess::VerifyDlls() { #ifndef TARGET_UNIX // Provide a knob for disabling this check if we really want to try and proceed anyway with a // DAC mismatch. DAC behavior may be arbitrarily bad - globals probably won't be at the same // address, data structures may be laid out differently, etc. if (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_DbgDACSkipVerifyDlls)) { return S_OK; } // Read the debug directory timestamp from the target mscorwks image using DAC // Note that we don't use the PE timestamp because the PE pPEAssembly might be changed in ways // that don't effect the PDB (and therefore don't effect DAC). Specifically, we rebase // our DLLs at the end of a build, that changes the PE pPEAssembly, but not the PDB. // Note that if we wanted to be extra careful, we could read the CV contents (which includes // the GUID signature) and verify it matches. Using the timestamp is useful for helpful error // messages, and should be sufficient in any real scenario. DWORD timestamp = 0; HRESULT hr = S_OK; DAC_ENTER(); EX_TRY { // Note that we don't need to worry about ensuring the image memory read by this code // is saved in a minidump. Managed minidump debugging already requires that you have // the full mscorwks.dll available at debug time (eg. windbg won't even load DAC without it). PEDecoder pedecoder(dac_cast<PTR_VOID>(m_globalBase)); // We use the first codeview debug directory entry since this should always refer to the single // PDB for mscorwks.dll. const UINT k_maxDebugEntries = 32; // a reasonable upper limit in case of corruption for( UINT i = 0; i < k_maxDebugEntries; i++) { PTR_IMAGE_DEBUG_DIRECTORY pDebugEntry = pedecoder.GetDebugDirectoryEntry(i); // If there are no more entries, then stop if (pDebugEntry == NULL) break; // Ignore non-codeview entries. Some scenarios (eg. optimized builds), there may be extra // debug directory entries at the end of some other type. if (pDebugEntry->Type == IMAGE_DEBUG_TYPE_CODEVIEW) { // Found a codeview entry - use it's timestamp for comparison timestamp = pDebugEntry->TimeDateStamp; break; } } char szMsgBuf[1024]; _snprintf_s(szMsgBuf, sizeof(szMsgBuf), _TRUNCATE, "Failed to find any valid codeview debug directory entry in %s image", MAIN_CLR_MODULE_NAME_A); _ASSERTE_MSG(timestamp != 0, szMsgBuf); } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &hr)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); if (FAILED(hr)) { return hr; } // Validate that we got a timestamp and it matches what the DAC table told us to expect if (timestamp == 0 || timestamp != g_dacTableInfo.dwID0) { // Timestamp mismatch. This means mscordacwks is being used with a version of // mscorwks other than the one it was built for. This will not work reliably. #ifdef _DEBUG // Check if verbose asserts are enabled. The default is up to the specific instantiation of // ClrDataAccess, but can be overridden (in either direction) by a COMPlus_ knob. // Note that we check this knob every time because it may be handy to turn it on in // the environment mid-flight. DWORD dwAssertDefault = m_fEnableDllVerificationAsserts ? 1 : 0; if (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_DbgDACAssertOnMismatch, dwAssertDefault)) { // Output a nice error message that contains the timestamps in string format. time_t actualTime = timestamp; char szActualTime[30]; ctime_s(szActualTime, sizeof(szActualTime), &actualTime); time_t expectedTime = g_dacTableInfo.dwID0; char szExpectedTime[30]; ctime_s(szExpectedTime, sizeof(szExpectedTime), &expectedTime); // Create a nice detailed message for the assert dialog. // Note that the strings returned by ctime_s have terminating newline characters. // This is technically a TARGET_CONSISTENCY_CHECK because a corrupt target could, // in-theory, have a corrupt mscrowks PE header and cause this check to fail // unnecessarily. However, this check occurs during startup, before we know // whether target consistency checks should be enabled, so it's always enabled // at the moment. char szMsgBuf[1024]; _snprintf_s(szMsgBuf, sizeof(szMsgBuf), _TRUNCATE, "DAC fatal error: %s/mscordacwks.dll version mismatch\n\n"\ "The debug directory timestamp of the loaded %s does not match the\n"\ "version mscordacwks.dll was built for.\n"\ "Expected %s timestamp: %s"\ "Actual %s timestamp: %s\n"\ "DAC will now fail to initialize with a CORDBG_E_MISMATCHED_CORWKS_AND_DACWKS_DLLS\n"\ "error. If you really want to try and use the mimatched DLLs, you can disable this\n"\ "check by setting COMPlus_DbgDACSkipVerifyDlls=1. However, using a mismatched DAC\n"\ "DLL will usually result in arbitrary debugger failures.\n", TARGET_MAIN_CLR_DLL_NAME_A, TARGET_MAIN_CLR_DLL_NAME_A, TARGET_MAIN_CLR_DLL_NAME_A, szExpectedTime, TARGET_MAIN_CLR_DLL_NAME_A, szActualTime); _ASSERTE_MSG(false, szMsgBuf); } #endif // Return a specific hresult indicating this problem return CORDBG_E_MISMATCHED_CORWKS_AND_DACWKS_DLLS; } #endif // TARGET_UNIX return S_OK; } #ifdef FEATURE_MINIMETADATA_IN_TRIAGEDUMPS void ClrDataAccess::InitStreamsForWriting(IN CLRDataEnumMemoryFlags flags) { // enforce this should only be called when generating triage and mini-dumps if (flags != CLRDATA_ENUM_MEM_MINI && flags != CLRDATA_ENUM_MEM_TRIAGE) return; EX_TRY { if (m_streams == NULL) m_streams = new DacStreamManager(g_MiniMetaDataBuffAddress, g_MiniMetaDataBuffMaxSize); if (!m_streams->PrepareStreamsForWriting()) { delete m_streams; m_streams = NULL; } } EX_CATCH { if (m_streams != NULL) { delete m_streams; m_streams = NULL; } } EX_END_CATCH(SwallowAllExceptions) } bool ClrDataAccess::MdCacheAddEEName(TADDR taEEStruct, const SString& name) { bool result = false; EX_TRY { if (m_streams != NULL) result = m_streams->MdCacheAddEEName(taEEStruct, name); } EX_CATCH { result = false; } EX_END_CATCH(SwallowAllExceptions) return result; } void ClrDataAccess::EnumStreams(IN CLRDataEnumMemoryFlags flags) { // enforce this should only be called when generating triage and mini-dumps if (flags != CLRDATA_ENUM_MEM_MINI && flags != CLRDATA_ENUM_MEM_TRIAGE) return; EX_TRY { if (m_streams != NULL) m_streams->EnumStreams(flags); } EX_CATCH { } EX_END_CATCH(SwallowAllExceptions) } bool ClrDataAccess::MdCacheGetEEName(TADDR taEEStruct, SString & eeName) { bool result = false; EX_TRY { if (m_streams == NULL) m_streams = new DacStreamManager(g_MiniMetaDataBuffAddress, g_MiniMetaDataBuffMaxSize); result = m_streams->MdCacheGetEEName(taEEStruct, eeName); } EX_CATCH { result = false; } EX_END_CATCH(SwallowAllExceptions) return result; } #endif // FEATURE_MINIMETADATA_IN_TRIAGEDUMPS // Needed for RT_RCDATA. #define MAKEINTRESOURCE(v) MAKEINTRESOURCEW(v) // this funny looking double macro forces x to be macro expanded before L is prepended #define _WIDE(x) _WIDE2(x) #define _WIDE2(x) W(x) HRESULT GetDacTableAddress(ICorDebugDataTarget* dataTarget, ULONG64 baseAddress, PULONG64 dacTableAddress) { #ifdef TARGET_UNIX #ifdef USE_DAC_TABLE_RVA #ifdef DAC_TABLE_SIZE if (DAC_TABLE_SIZE != sizeof(g_dacGlobals)) { return E_INVALIDARG; } #endif // On MacOS, FreeBSD or NetBSD use the RVA include file *dacTableAddress = baseAddress + DAC_TABLE_RVA; #else // On Linux/MacOS try to get the dac table address via the export symbol if (!TryGetSymbol(dataTarget, baseAddress, "g_dacTable", dacTableAddress)) { return CORDBG_E_MISSING_DEBUGGER_EXPORTS; } #endif #endif return S_OK; } HRESULT ClrDataAccess::GetDacGlobals() { #ifdef TARGET_UNIX ULONG64 dacTableAddress; HRESULT hr = GetDacTableAddress(m_pTarget, m_globalBase, &dacTableAddress); if (FAILED(hr)) { return hr; } if (FAILED(ReadFromDataTarget(m_pTarget, dacTableAddress, (BYTE*)&g_dacGlobals, sizeof(g_dacGlobals)))) { return CORDBG_E_MISSING_DEBUGGER_EXPORTS; } if (g_dacGlobals.ThreadStore__s_pThreadStore == NULL) { return CORDBG_E_UNSUPPORTED; } return S_OK; #else HRESULT status = E_FAIL; DWORD rsrcRVA = 0; LPVOID rsrcData = NULL; DWORD rsrcSize = 0; DWORD resourceSectionRVA = 0; if (FAILED(status = GetMachineAndResourceSectionRVA(m_pTarget, m_globalBase, NULL, &resourceSectionRVA))) { _ASSERTE_MSG(false, "DAC fatal error: can't locate resource section in " TARGET_MAIN_CLR_DLL_NAME_A); return CORDBG_E_MISSING_DEBUGGER_EXPORTS; } if (FAILED(status = GetResourceRvaFromResourceSectionRvaByName(m_pTarget, m_globalBase, resourceSectionRVA, (DWORD)(size_t)RT_RCDATA, _WIDE(DACCESS_TABLE_RESOURCE), 0, &rsrcRVA, &rsrcSize))) { _ASSERTE_MSG(false, "DAC fatal error: can't locate DAC table resource in " TARGET_MAIN_CLR_DLL_NAME_A); return CORDBG_E_MISSING_DEBUGGER_EXPORTS; } rsrcData = new (nothrow) BYTE[rsrcSize]; if (rsrcData == NULL) return E_OUTOFMEMORY; if (FAILED(status = ReadFromDataTarget(m_pTarget, m_globalBase + rsrcRVA, (BYTE*)rsrcData, rsrcSize))) { _ASSERTE_MSG(false, "DAC fatal error: can't load DAC table resource from " TARGET_MAIN_CLR_DLL_NAME_A); return CORDBG_E_MISSING_DEBUGGER_EXPORTS; } PBYTE rawData = (PBYTE)rsrcData; DWORD bytesLeft = rsrcSize; // Read the header struct DacTableHeader header; // We currently expect the header to be 2 32-bit values and 1 16-byte value, // make sure there is no packing going on or anything. static_assert_no_msg(sizeof(DacTableHeader) == 2 * 4 + 16); if (bytesLeft < sizeof(DacTableHeader)) { _ASSERTE_MSG(false, "DAC fatal error: DAC table too small for header."); goto Exit; } memcpy(&header, rawData, sizeof(DacTableHeader)); rawData += sizeof(DacTableHeader); bytesLeft -= sizeof(DacTableHeader); // Save the table info for later use g_dacTableInfo = header.info; // Sanity check that the DAC table is the size we expect. // This could fail if a different version of dacvars.h or vptr_list.h was used when building // mscordacwks.dll than when running DacTableGen. if (offsetof(DacGlobals, EEJitManager__vtAddr) != header.numGlobals * sizeof(ULONG)) { #ifdef _DEBUG char szMsgBuf[1024]; _snprintf_s(szMsgBuf, sizeof(szMsgBuf), _TRUNCATE, "DAC fatal error: mismatch in number of globals in DAC table. Read from file: %d, expected: %zd.", header.numGlobals, (size_t)offsetof(DacGlobals, EEJitManager__vtAddr) / sizeof(ULONG)); _ASSERTE_MSG(false, szMsgBuf); #endif // _DEBUG status = E_INVALIDARG; goto Exit; } if (sizeof(DacGlobals) != (header.numGlobals + header.numVptrs) * sizeof(ULONG)) { #ifdef _DEBUG char szMsgBuf[1024]; _snprintf_s(szMsgBuf, sizeof(szMsgBuf), _TRUNCATE, "DAC fatal error: mismatch in number of vptrs in DAC table. Read from file: %d, expected: %zd.", header.numVptrs, (size_t)(sizeof(DacGlobals) - offsetof(DacGlobals, EEJitManager__vtAddr)) / sizeof(ULONG)); _ASSERTE_MSG(false, szMsgBuf); #endif // _DEBUG status = E_INVALIDARG; goto Exit; } // Copy the DAC table into g_dacGlobals if (bytesLeft < sizeof(DacGlobals)) { _ASSERTE_MSG(false, "DAC fatal error: DAC table resource too small for DacGlobals."); status = E_UNEXPECTED; goto Exit; } memcpy(&g_dacGlobals, rawData, sizeof(DacGlobals)); rawData += sizeof(DacGlobals); bytesLeft -= sizeof(DacGlobals); status = S_OK; Exit: return status; #endif } #undef MAKEINTRESOURCE //---------------------------------------------------------------------------- // // IsExceptionFromManagedCode - report if pExceptionRecord points to an exception belonging to the current runtime // // Arguments: // pExceptionRecord - the exception record // // Return Value: // TRUE if it is // Otherwise, FALSE // //---------------------------------------------------------------------------- BOOL ClrDataAccess::IsExceptionFromManagedCode(EXCEPTION_RECORD* pExceptionRecord) { DAC_ENTER(); BOOL flag = FALSE; if (::IsExceptionFromManagedCode(pExceptionRecord)) { flag = TRUE; } DAC_LEAVE(); return flag; } #ifndef TARGET_UNIX //---------------------------------------------------------------------------- // // GetWatsonBuckets - retrieve Watson buckets from the specified thread // // Arguments: // dwThreadId - the thread ID // pGM - pointer to the space to store retrieved Watson buckets // // Return Value: // S_OK if the operation is successful. // or S_FALSE if Watson buckets cannot be found // else detailed error code. // //---------------------------------------------------------------------------- HRESULT ClrDataAccess::GetWatsonBuckets(DWORD dwThreadId, GenericModeBlock * pGM) { _ASSERTE((dwThreadId != 0) && (pGM != NULL)); if ((dwThreadId == 0) || (pGM == NULL)) { return E_INVALIDARG; } DAC_ENTER(); Thread * pThread = DacGetThread(dwThreadId); _ASSERTE(pThread != NULL); HRESULT hr = E_UNEXPECTED; if (pThread != NULL) { hr = GetClrWatsonBucketsWorker(pThread, pGM); } DAC_LEAVE(); return hr; } #endif // TARGET_UNIX //---------------------------------------------------------------------------- // // CLRDataAccessCreateInstance - create and initialize a ClrDataAccess object // // Arguments: // pLegacyTarget - data target object // pClrDataAccess - ClrDataAccess object // // Return Value: // S_OK on success, else detailed error code. // //---------------------------------------------------------------------------- STDAPI CLRDataAccessCreateInstance(ICLRDataTarget * pLegacyTarget, ClrDataAccess ** pClrDataAccess) { if ((pLegacyTarget == NULL) || (pClrDataAccess == NULL)) { return E_INVALIDARG; } *pClrDataAccess = NULL; // Create an adapter which implements the new ICorDebugDataTarget interfaces using // a legacy implementation of ICLRDataTarget // ClrDataAccess will take a take a ref on this and delete it when it's released. DataTargetAdapter * pDtAdapter = new (nothrow) DataTargetAdapter(pLegacyTarget); if (!pDtAdapter) { return E_OUTOFMEMORY; } ClrDataAccess* dacClass = new (nothrow) ClrDataAccess(pDtAdapter, pLegacyTarget); if (!dacClass) { delete pDtAdapter; return E_OUTOFMEMORY; } HRESULT hr = dacClass->Initialize(); if (FAILED(hr)) { dacClass->Release(); return hr; } *pClrDataAccess = dacClass; return S_OK; } //---------------------------------------------------------------------------- // // CLRDataCreateInstance. // Creates the IXClrData object // This is the legacy entrypoint to DAC, used by dbgeng/dbghelp (windbg, SOS, watson, etc). // //---------------------------------------------------------------------------- STDAPI DLLEXPORT CLRDataCreateInstance(REFIID iid, ICLRDataTarget * pLegacyTarget, void ** iface) { if ((pLegacyTarget == NULL) || (iface == NULL)) { return E_INVALIDARG; } *iface = NULL; ClrDataAccess * pClrDataAccess; HRESULT hr = CLRDataAccessCreateInstance(pLegacyTarget, &pClrDataAccess); if (hr != S_OK) { return hr; } hr = pClrDataAccess->QueryInterface(iid, iface); pClrDataAccess->Release(); return hr; } //---------------------------------------------------------------------------- // // OutOfProcessExceptionEventGetProcessIdAndThreadId - get ProcessID and ThreadID // // Arguments: // hProcess - process handle // hThread - thread handle // pPId - pointer to DWORD to store ProcessID // pThreadId - pointer to DWORD to store ThreadID // // Return Value: // TRUE if the operation is successful. // FALSE if it fails // //---------------------------------------------------------------------------- BOOL OutOfProcessExceptionEventGetProcessIdAndThreadId(HANDLE hProcess, HANDLE hThread, DWORD * pPId, DWORD * pThreadId) { _ASSERTE((pPId != NULL) && (pThreadId != NULL)); #ifdef TARGET_UNIX // UNIXTODO: mikem 1/13/15 Need appropriate PAL functions for getting ids *pPId = (DWORD)(SIZE_T)hProcess; *pThreadId = (DWORD)(SIZE_T)hThread; #else *pPId = GetProcessIdOfThread(hThread); *pThreadId = GetThreadId(hThread); #endif // TARGET_UNIX return TRUE; } // WER_RUNTIME_EXCEPTION_INFORMATION will be available from Win7 SDK once Win7 SDK is released. #if !defined(WER_RUNTIME_EXCEPTION_INFORMATION) typedef struct _WER_RUNTIME_EXCEPTION_INFORMATION { DWORD dwSize; HANDLE hProcess; HANDLE hThread; EXCEPTION_RECORD exceptionRecord; CONTEXT context; } WER_RUNTIME_EXCEPTION_INFORMATION, * PWER_RUNTIME_EXCEPTION_INFORMATION; #endif // !defined(WER_RUNTIME_EXCEPTION_INFORMATION) #ifndef TARGET_UNIX //---------------------------------------------------------------------------- // // OutOfProcessExceptionEventGetWatsonBucket - retrieve Watson buckets if it is a managed exception // // Arguments: // pContext - the context passed at helper module registration // pExceptionInformation - structure that contains information about the crash // pGM - pointer to the space to store retrieved Watson buckets // // Return Value: // S_OK if the operation is successful. // or S_FALSE if it is not a managed exception or Watson buckets cannot be found // else detailed error code. // //---------------------------------------------------------------------------- STDAPI OutOfProcessExceptionEventGetWatsonBucket(_In_ PDWORD pContext, _In_ const PWER_RUNTIME_EXCEPTION_INFORMATION pExceptionInformation, _Out_ GenericModeBlock * pGMB) { HANDLE hProcess = pExceptionInformation->hProcess; HANDLE hThread = pExceptionInformation->hThread; DWORD PId, ThreadId; if (!OutOfProcessExceptionEventGetProcessIdAndThreadId(hProcess, hThread, &PId, &ThreadId)) { return E_FAIL; } CLRDATA_ADDRESS baseAddressOfRuntime = (CLRDATA_ADDRESS)pContext; NewHolder<LiveProcDataTarget> dataTarget(NULL); dataTarget = new (nothrow) LiveProcDataTarget(hProcess, PId, baseAddressOfRuntime); if (dataTarget == NULL) { return E_OUTOFMEMORY; } NewHolder<ClrDataAccess> pClrDataAccess(NULL); HRESULT hr = CLRDataAccessCreateInstance(dataTarget, &pClrDataAccess); if (hr != S_OK) { if (hr == S_FALSE) { return E_FAIL; } else { return hr; } } if (!pClrDataAccess->IsExceptionFromManagedCode(&pExceptionInformation->exceptionRecord)) { return S_FALSE; } return pClrDataAccess->GetWatsonBuckets(ThreadId, pGMB); } //---------------------------------------------------------------------------- // // OutOfProcessExceptionEventCallback - claim the ownership of this event if current // runtime threw the unhandled exception // // Arguments: // pContext - the context passed at helper module registration // pExceptionInformation - structure that contains information about the crash // pbOwnershipClaimed - output parameter for claiming the ownership of this event // pwszEventName - name of the event. If this is NULL, pchSize cannot be NULL. // This parameter is valid only if * pbOwnershipClaimed is TRUE. // pchSize - the size of the buffer pointed by pwszEventName // pdwSignatureCount - the count of signature parameters. Valid values range from // 0 to 10. If the value returned is greater than 10, only the // 1st 10 parameters are used for bucketing parameters. This // parameter is valid only if * pbOwnershipClaimed is TRUE. // // Return Value: // S_OK on success, else detailed error code. // // Note: // This is the 1st function that is called into by WER. This API through its out // parameters, tells WER as to whether or not it is claiming the crash. If it does // claim the crash, WER uses the event name specified in the string pointed to by // pwszEventName for error reporting. WER then proceed to call the // OutOfProcessExceptionEventSignatureCallback to get the bucketing parameters from // the helper dll. // // This function follows the multiple call paradigms. WER may call into this function // with *pwszEventName pointer set to NULL. This is to indicate to the function, that // WER wants to know the buffer size needed by the function to populate the string // into the buffer. The function should return E_INSUFFICIENTBUFFER with the needed // buffer size in *pchSize. WER shall then allocate a buffer of size *pchSize for // pwszEventName and then call this function again at which point the function should // populate the string and return S_OK. // // Note that *pdOwnershipClaimed should be set to TRUE everytime this function is called // for the helper dll to claim ownership of bucketing. // // The Win7 WER spec is at // http://windows/windows7/docs/COSD%20Documents/Fundamentals/Feedback%20Services%20and%20Platforms/WER-CLR%20Integration%20Dev%20Spec.docx // // !!!READ THIS!!! // Since this is called by external modules it's important that we don't let any exceptions leak out (see Win8 95224). // //---------------------------------------------------------------------------- STDAPI OutOfProcessExceptionEventCallback(_In_ PDWORD pContext, _In_ const PWER_RUNTIME_EXCEPTION_INFORMATION pExceptionInformation, _Out_ BOOL * pbOwnershipClaimed, _Out_writes_(*pchSize) PWSTR pwszEventName, __inout PDWORD pchSize, _Out_ PDWORD pdwSignatureCount) { SUPPORTS_DAC_HOST_ONLY; if ((pContext == NULL) || (pExceptionInformation == NULL) || (pExceptionInformation->dwSize < sizeof(WER_RUNTIME_EXCEPTION_INFORMATION)) || (pbOwnershipClaimed == NULL) || (pchSize == NULL) || (pdwSignatureCount == NULL)) { return E_INVALIDARG; } *pbOwnershipClaimed = FALSE; GenericModeBlock gmb; HRESULT hr = E_FAIL; EX_TRY { // get Watson buckets if it is a managed exception hr = OutOfProcessExceptionEventGetWatsonBucket(pContext, pExceptionInformation, &gmb); } EX_CATCH_HRESULT(hr); if (hr != S_OK) { // S_FALSE means either it is not a managed exception or we do not have Watson buckets. // Since we have set pbOwnershipClaimed to FALSE, we return S_OK to WER. if (hr == S_FALSE) { hr = S_OK; } return hr; } if ((pwszEventName == NULL) || (*pchSize <= wcslen(gmb.wzEventTypeName))) { *pchSize = static_cast<DWORD>(wcslen(gmb.wzEventTypeName)) + 1; return HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER); } // copy custom event name wcscpy_s(pwszEventName, *pchSize, gmb.wzEventTypeName); *pdwSignatureCount = GetCountBucketParamsForEvent(gmb.wzEventTypeName); *pbOwnershipClaimed = TRUE; return S_OK; } //---------------------------------------------------------------------------- // // OutOfProcessExceptionEventCallback - provide custom Watson buckets // // Arguments: // pContext - the context passed at helper module registration // pExceptionInformation - structure that contains information about the crash // dwIndex - the index of the bucketing parameter being requested. Valid values are // from 0 to 9 // pwszName - pointer to the name of the bucketing parameter // pchName - pointer to character count of the pwszName buffer. If pwszName points to // null, *pchName represents the buffer size (represented in number of characters) // needed to populate the name in pwszName. // pwszValue - pointer to the value of the pwszName bucketing parameter // pchValue - pointer to the character count of the pwszValue buffer. If pwszValue points // to null, *pchValue represents the buffer size (represented in number of // characters) needed to populate the value in pwszValue. // // Return Value: // S_OK on success, else detailed error code. // // Note: // This function is called by WER only if the call to OutOfProcessExceptionEventCallback() // was successful and the value of *pbOwnershipClaimed was TRUE. This function is called // pdwSignatureCount times to collect the bucketing parameters from the helper dll. // // This function also follows the multiple call paradigm as described for the // OutOfProcessExceptionEventCallback() function. The buffer sizes needed for // this function are of the pwszName and pwszValue buffers. // // !!!READ THIS!!! // Since this is called by external modules it's important that we don't let any exceptions leak out (see Win8 95224). // //---------------------------------------------------------------------------- STDAPI OutOfProcessExceptionEventSignatureCallback(_In_ PDWORD pContext, _In_ const PWER_RUNTIME_EXCEPTION_INFORMATION pExceptionInformation, _In_ DWORD dwIndex, _Out_writes_(*pchName) PWSTR pwszName, __inout PDWORD pchName, _Out_writes_(*pchValue) PWSTR pwszValue, __inout PDWORD pchValue) { SUPPORTS_DAC_HOST_ONLY; if ((pContext == NULL) || (pExceptionInformation == NULL) || (pExceptionInformation->dwSize < sizeof(WER_RUNTIME_EXCEPTION_INFORMATION)) || (pchName == NULL) || (pchValue == NULL)) { return E_INVALIDARG; } if ((pwszName == NULL) || (*pchName == 0)) { *pchName = 1; return HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER); } GenericModeBlock gmb; const PWSTR pwszBucketValues[] = {gmb.wzP1, gmb.wzP2, gmb.wzP3, gmb.wzP4, gmb.wzP5, gmb.wzP6, gmb.wzP7, gmb.wzP8, gmb.wzP9, gmb.wzP10}; HRESULT hr = E_FAIL; EX_TRY { // get Watson buckets if it is a managed exception hr = OutOfProcessExceptionEventGetWatsonBucket(pContext, pExceptionInformation, &gmb); } EX_CATCH_HRESULT(hr); // it's possible for the OS to kill // the faulting process before WER crash reporting has completed. _ASSERTE(hr == S_OK || hr == CORDBG_E_READVIRTUAL_FAILURE); if (hr != S_OK) { // S_FALSE means either it is not a managed exception or we do not have Watson buckets. // Either case is a logic error becuase this function is called by WER only if the call // to OutOfProcessExceptionEventCallback() was successful and the value of // *pbOwnershipClaimed was TRUE. if (hr == S_FALSE) { hr = E_FAIL; } return hr; } DWORD paramCount = GetCountBucketParamsForEvent(gmb.wzEventTypeName); if (dwIndex >= paramCount) { _ASSERTE(!"dwIndex is out of range"); return E_INVALIDARG; } // Return pwszName as an emptry string to let WER use localized version of "Parameter n" *pwszName = W('\0'); if ((pwszValue == NULL) || (*pchValue <= wcslen(pwszBucketValues[dwIndex]))) { *pchValue = static_cast<DWORD>(wcslen(pwszBucketValues[dwIndex]))+ 1; return HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER); } // copy custom Watson bucket value wcscpy_s(pwszValue, *pchValue, pwszBucketValues[dwIndex]); return S_OK; } #endif // TARGET_UNIX //---------------------------------------------------------------------------- // // OutOfProcessExceptionEventCallback - provide custom debugger launch string // // Arguments: // pContext - the context passed at helper module registration // pExceptionInformation - structure that contains information about the crash // pbCustomDebuggerNeeded - pointer to a BOOL. If this BOOL is set to TRUE, then // a custom debugger launch option is needed by the // process. In that case, the subsequent parameters will // be meaningfully used. If this is FALSE, the subsequent // parameters will be ignored. // pwszDebuggerLaunch - pointer to a string that will be used to launch the debugger, // if the debugger is launched. The value of this string overrides // the default debugger launch string used by WER. // pchSize - pointer to the character count of the pwszDebuggerLaunch buffer. If // pwszDebuggerLaunch points to null, *pchSize represents the buffer size // (represented in number of characters) needed to populate the debugger // launch string in pwszDebuggerLaunch. // pbAutoLaunchDebugger - pointer to a BOOL. If this BOOL is set to TRUE, WER will // directly launch the debugger. If set to FALSE, WER will show // the debug option to the user in the WER UI. // // Return Value: // S_OK on success, else detailed error code. // // Note: // This function is called into by WER only if the call to OutOfProcessExceptionEventCallback() // was successful and the value of *pbOwnershipClaimed was TRUE. This function allows the helper // dll to customize the debugger launch options including the launch string. // // This function also follows the multiple call paradigm as described for the // OutOfProcessExceptionEventCallback() function. The buffer sizes needed for // this function are of the pwszName and pwszValue buffers. // //---------------------------------------------------------------------------- STDAPI OutOfProcessExceptionEventDebuggerLaunchCallback(_In_ PDWORD pContext, _In_ const PWER_RUNTIME_EXCEPTION_INFORMATION pExceptionInformation, _Out_ BOOL * pbCustomDebuggerNeeded, _Out_writes_opt_(*pchSize) PWSTR pwszDebuggerLaunch, __inout PDWORD pchSize, _Out_ BOOL * pbAutoLaunchDebugger) { SUPPORTS_DAC_HOST_ONLY; if ((pContext == NULL) || (pExceptionInformation == NULL) || (pExceptionInformation->dwSize < sizeof(WER_RUNTIME_EXCEPTION_INFORMATION)) || (pbCustomDebuggerNeeded == NULL) || (pwszDebuggerLaunch == NULL) || (pchSize == NULL) || (pbAutoLaunchDebugger == NULL)) { return E_INVALIDARG; } // Starting from CLRv4 managed debugger string and setting are unified with native debuggers. // There is no need to provide custom debugger string for WER. *pbCustomDebuggerNeeded = FALSE; return S_OK; } // DacHandleEnum #include "comcallablewrapper.h" DacHandleWalker::DacHandleWalker() : mDac(0), m_instanceAge(0), mMap(0), mIndex(0), mTypeMask(0), mGenerationFilter(-1), mChunkIndex(0), mCurr(0), mIteratorIndex(0) { SUPPORTS_DAC; } DacHandleWalker::~DacHandleWalker() { SUPPORTS_DAC; HandleChunkHead *curr = mHead.Next; while (curr) { HandleChunkHead *tmp = curr; curr = curr->Next; delete tmp; } } HRESULT DacHandleWalker::Init(ClrDataAccess *dac, UINT types[], UINT typeCount) { SUPPORTS_DAC; if (dac == NULL || types == NULL) return E_POINTER; mDac = dac; m_instanceAge = dac->m_instanceAge; return Init(BuildTypemask(types, typeCount)); } HRESULT DacHandleWalker::Init(ClrDataAccess *dac, UINT types[], UINT typeCount, int gen) { SUPPORTS_DAC; if (gen < 0 || gen > (int)*g_gcDacGlobals->max_gen) return E_INVALIDARG; mGenerationFilter = gen; return Init(dac, types, typeCount); } HRESULT DacHandleWalker::Init(UINT32 typemask) { SUPPORTS_DAC; mMap = g_gcDacGlobals->handle_table_map; mTypeMask = typemask; return S_OK; } UINT32 DacHandleWalker::BuildTypemask(UINT types[], UINT typeCount) { SUPPORTS_DAC; UINT32 mask = 0; for (UINT i = 0; i < typeCount; ++i) { _ASSERTE(types[i] < 32); mask |= (1 << types[i]); } return mask; } HRESULT DacHandleWalker::Next(unsigned int celt, SOSHandleData handles[], unsigned int *pceltFetched) { SUPPORTS_DAC; if (handles == NULL || pceltFetched == NULL) return E_POINTER; SOSHelperEnter(); hr = DoHandleWalk<SOSHandleData, unsigned int, DacHandleWalker::EnumCallbackSOS>(celt, handles, pceltFetched); SOSHelperLeave(); return hr; } bool DacHandleWalker::FetchMoreHandles(HANDLESCANPROC callback) { SUPPORTS_DAC; // The table slots are based on the number of GC heaps in the process. int max_slots = 1; #ifdef FEATURE_SVR_GC if (GCHeapUtilities::IsServerHeap()) max_slots = GCHeapCount(); #endif // FEATURE_SVR_GC // Reset the Count on all cached chunks. We reuse chunks after allocating // them, and the count is the only thing which needs resetting. for (HandleChunkHead *curr = &mHead; curr; curr = curr->Next) curr->Count = 0; DacHandleWalkerParam param(&mHead); do { // Have we advanced past the end of the current bucket? if (mMap && mIndex >= INITIAL_HANDLE_TABLE_ARRAY_SIZE) { mIndex = 0; mMap = mMap->pNext; } // Have we walked the entire handle table map? if (mMap == NULL) { mCurr = NULL; return false; } if (mMap->pBuckets[mIndex] != NULL) { for (int i = 0; i < max_slots; ++i) { DPTR(dac_handle_table) hTable = mMap->pBuckets[mIndex]->pTable[i]; if (hTable) { // Yikes! The handle table callbacks don't produce the handle type or // the AppDomain that we need, and it's too difficult to propagate out // these things (especially the type) without worrying about performance // implications for the GC. Instead we'll have the callback walk each // type individually. There are only a few handle types, and the handle // table has a fast-path for only walking a single type anyway. UINT32 handleType = 0; for (UINT32 mask = mTypeMask; mask; mask >>= 1, handleType++) { if (mask & 1) { dac_handle_table *pTable = hTable; PTR_AppDomain pDomain = AppDomain::GetCurrentDomain(); param.AppDomain = TO_CDADDR(pDomain.GetAddr()); param.Type = handleType; // Either enumerate the handles regularly, or walk the handle // table as the GC does if a generation filter was requested. if (mGenerationFilter != -1) HndScanHandlesForGC(hTable, callback, (LPARAM)&param, 0, &handleType, 1, mGenerationFilter, *g_gcDacGlobals->max_gen, 0); else HndEnumHandles(hTable, &handleType, 1, callback, (LPARAM)&param, 0, FALSE); } } } } } // Stop looping as soon as we have found data. We also stop if we have a failed HRESULT during // the callback (this should indicate OOM). mIndex++; } while (mHead.Count == 0 && SUCCEEDED(param.Result)); mCurr = mHead.Next; return true; } HRESULT DacHandleWalker::Skip(unsigned int celt) { return E_NOTIMPL; } HRESULT DacHandleWalker::Reset() { return E_NOTIMPL; } HRESULT DacHandleWalker::GetCount(unsigned int *pcelt) { return E_NOTIMPL; } void DacHandleWalker::GetRefCountedHandleInfo( OBJECTREF oref, unsigned int uType, unsigned int *pRefCount, unsigned int *pJupiterRefCount, BOOL *pIsPegged, BOOL *pIsStrong) { SUPPORTS_DAC; if (pJupiterRefCount) *pJupiterRefCount = 0; if (pIsPegged) *pIsPegged = FALSE; #if defined(FEATURE_COMINTEROP) || defined(FEATURE_COMWRAPPERS) || defined(FEATURE_OBJCMARSHAL) if (uType == HNDTYPE_REFCOUNTED) { #if defined(FEATURE_COMINTEROP) // get refcount from the CCW PTR_ComCallWrapper pWrap = ComCallWrapper::GetWrapperForObject(oref); if (pWrap != NULL) { if (pRefCount) *pRefCount = (unsigned int)pWrap->GetRefCount(); if (pIsStrong) *pIsStrong = pWrap->IsWrapperActive(); return; } #endif #if defined(FEATURE_OBJCMARSHAL) // [TODO] FEATURE_OBJCMARSHAL #endif // FEATURE_OBJCMARSHAL } #endif // FEATURE_COMINTEROP || FEATURE_COMWRAPPERS || FEATURE_OBJCMARSHAL if (pRefCount) *pRefCount = 0; if (pIsStrong) *pIsStrong = FALSE; } void CALLBACK DacHandleWalker::EnumCallbackSOS(PTR_UNCHECKED_OBJECTREF handle, uintptr_t *pExtraInfo, uintptr_t param1, uintptr_t param2) { SUPPORTS_DAC; DacHandleWalkerParam *param = (DacHandleWalkerParam *)param1; HandleChunkHead *curr = param->Curr; // If we failed on a previous call (OOM) don't keep trying to allocate, it's not going to work. if (FAILED(param->Result)) return; // We've moved past the size of the current chunk. We'll allocate a new chunk // and stuff the handles there. These are cleaned up by the destructor if (curr->Count >= (curr->Size/sizeof(SOSHandleData))) { if (curr->Next == NULL) { HandleChunk *next = new (nothrow) HandleChunk; if (next != NULL) { curr->Next = next; } else { param->Result = E_OUTOFMEMORY; return; } } curr = param->Curr = param->Curr->Next; } // Fill the current handle. SOSHandleData *dataArray = (SOSHandleData*)curr->pData; SOSHandleData &data = dataArray[curr->Count++]; data.Handle = TO_CDADDR(handle.GetAddr()); data.Type = param->Type; if (param->Type == HNDTYPE_DEPENDENT) data.Secondary = GetDependentHandleSecondary(handle.GetAddr()).GetAddr(); #ifdef FEATURE_COMINTEROP else if (param->Type == HNDTYPE_WEAK_NATIVE_COM) data.Secondary = HndGetHandleExtraInfo(handle.GetAddr()); #endif // FEATURE_COMINTEROP else data.Secondary = 0; data.AppDomain = param->AppDomain; GetRefCountedHandleInfo((OBJECTREF)*handle, param->Type, &data.RefCount, &data.JupiterRefCount, &data.IsPegged, &data.StrongReference); data.StrongReference |= (BOOL)IsAlwaysStrongReference(param->Type); } DacStackReferenceWalker::DacStackReferenceWalker(ClrDataAccess *dac, DWORD osThreadID) : mDac(dac), m_instanceAge(dac ? dac->m_instanceAge : 0), mThread(0), mErrors(0), mEnumerated(false), mChunkIndex(0), mCurr(0), mIteratorIndex(0) { Thread *curr = NULL; for (curr = ThreadStore::GetThreadList(curr); curr; curr = ThreadStore::GetThreadList(curr)) { if (curr->GetOSThreadId() == osThreadID) { mThread = curr; break; } } } DacStackReferenceWalker::~DacStackReferenceWalker() { StackRefChunkHead *curr = mHead.next; while (curr) { StackRefChunkHead *tmp = curr; curr = curr->next; delete tmp; } } HRESULT DacStackReferenceWalker::Init() { if (!mThread) return E_INVALIDARG; return mHeap.Init(); } HRESULT STDMETHODCALLTYPE DacStackReferenceWalker::Skip(unsigned int count) { return E_NOTIMPL; } HRESULT STDMETHODCALLTYPE DacStackReferenceWalker::Reset() { return E_NOTIMPL; } HRESULT DacStackReferenceWalker::GetCount(unsigned int *pCount) { if (!pCount) return E_POINTER; SOSHelperEnter(); if (!mEnumerated) { // Fill out our data structures. WalkStack<unsigned int, SOSStackRefData>(0, NULL, DacStackReferenceWalker::GCReportCallbackSOS, DacStackReferenceWalker::GCEnumCallbackSOS); } unsigned int count = 0; for(StackRefChunkHead *curr = &mHead; curr; curr = curr->next) count += curr->count; *pCount = count; SOSHelperLeave(); return hr; } HRESULT DacStackReferenceWalker::Next(unsigned int count, SOSStackRefData stackRefs[], unsigned int *pFetched) { if (stackRefs == NULL || pFetched == NULL) return E_POINTER; SOSHelperEnter(); hr = DoStackWalk<unsigned int, SOSStackRefData, DacStackReferenceWalker::GCReportCallbackSOS, DacStackReferenceWalker::GCEnumCallbackSOS> (count, stackRefs, pFetched); SOSHelperLeave(); return hr; } HRESULT DacStackReferenceWalker::EnumerateErrors(ISOSStackRefErrorEnum **ppEnum) { if (!ppEnum) return E_POINTER; SOSHelperEnter(); if (mThread) { // Fill out our data structures. WalkStack<unsigned int, SOSStackRefData>(0, NULL, DacStackReferenceWalker::GCReportCallbackSOS, DacStackReferenceWalker::GCEnumCallbackSOS); } DacStackReferenceErrorEnum *pEnum = new DacStackReferenceErrorEnum(this, mErrors); hr = pEnum->QueryInterface(__uuidof(ISOSStackRefErrorEnum), (void**)ppEnum); SOSHelperLeave(); return hr; } CLRDATA_ADDRESS DacStackReferenceWalker::ReadPointer(TADDR addr) { ULONG32 bytesRead = 0; TADDR result = 0; HRESULT hr = mDac->m_pTarget->ReadVirtual(addr, (BYTE*)&result, sizeof(TADDR), &bytesRead); if (FAILED(hr) || (bytesRead != sizeof(TADDR))) return (CLRDATA_ADDRESS)~0; return TO_CDADDR(result); } void DacStackReferenceWalker::GCEnumCallbackSOS(LPVOID hCallback, OBJECTREF *pObject, uint32_t flags, DacSlotLocation loc) { GCCONTEXT *gcctx = (GCCONTEXT *)hCallback; DacScanContext *dsc = (DacScanContext*)gcctx->sc; // Yuck. The GcInfoDecoder reports a local pointer for registers (as it's reading out of the REGDISPLAY // in the stack walk), and it reports a TADDR for stack locations. This is architecturally difficulty // to fix, so we are leaving it for now. TADDR addr = 0; TADDR obj = 0; if (loc.targetPtr) { addr = (TADDR)pObject; obj = TO_TADDR(dsc->pWalker->ReadPointer((CORDB_ADDRESS)addr)); } else { obj = pObject->GetAddr(); } if (flags & GC_CALL_INTERIOR) { CORDB_ADDRESS fixed_obj = 0; HRESULT hr = dsc->pWalker->mHeap.ListNearObjects((CORDB_ADDRESS)obj, NULL, &fixed_obj, NULL); // If we failed...oh well, SOS won't mind. We'll just report the interior pointer as is. if (SUCCEEDED(hr)) obj = TO_TADDR(fixed_obj); } SOSStackRefData *data = dsc->pWalker->GetNextObject<SOSStackRefData>(dsc); if (data != NULL) { // Report where the object and where it was found. data->HasRegisterInformation = true; data->Register = loc.reg; data->Offset = loc.regOffset; data->Address = TO_CDADDR(addr); data->Object = TO_CDADDR(obj); data->Flags = flags; // Report the frame that the data came from. data->StackPointer = TO_CDADDR(dsc->sp); if (dsc->pFrame) { data->SourceType = SOS_StackSourceFrame; data->Source = dac_cast<PTR_Frame>(dsc->pFrame).GetAddr(); } else { data->SourceType = SOS_StackSourceIP; data->Source = TO_CDADDR(dsc->pc); } } } void DacStackReferenceWalker::GCReportCallbackSOS(PTR_PTR_Object ppObj, ScanContext *sc, uint32_t flags) { DacScanContext *dsc = (DacScanContext*)sc; CLRDATA_ADDRESS obj = dsc->pWalker->ReadPointer(ppObj.GetAddr()); if (flags & GC_CALL_INTERIOR) { CORDB_ADDRESS fixed_addr = 0; HRESULT hr = dsc->pWalker->mHeap.ListNearObjects((CORDB_ADDRESS)obj, NULL, &fixed_addr, NULL); // If we failed...oh well, SOS won't mind. We'll just report the interior pointer as is. if (SUCCEEDED(hr)) obj = TO_CDADDR(fixed_addr); } SOSStackRefData *data = dsc->pWalker->GetNextObject<SOSStackRefData>(dsc); if (data != NULL) { data->HasRegisterInformation = false; data->Register = 0; data->Offset = 0; data->Address = ppObj.GetAddr(); data->Object = obj; data->Flags = flags; data->StackPointer = TO_CDADDR(dsc->sp); if (dsc->pFrame) { data->SourceType = SOS_StackSourceFrame; data->Source = dac_cast<PTR_Frame>(dsc->pFrame).GetAddr(); } else { data->SourceType = SOS_StackSourceIP; data->Source = TO_CDADDR(dsc->pc); } } } StackWalkAction DacStackReferenceWalker::Callback(CrawlFrame *pCF, VOID *pData) { // // KEEP IN SYNC WITH GcStackCrawlCallBack in vm\gcscan.cpp // GCCONTEXT *gcctx = (GCCONTEXT*)pData; DacScanContext *dsc = (DacScanContext*)gcctx->sc; MethodDesc *pMD = pCF->GetFunction(); gcctx->sc->pMD = pMD; PREGDISPLAY pRD = pCF->GetRegisterSet(); dsc->sp = (TADDR)GetRegdisplaySP(pRD);; dsc->pc = PCODEToPINSTR(GetControlPC(pRD)); ResetPointerHolder<CrawlFrame*> rph(&gcctx->cf); gcctx->cf = pCF; bool fReportGCReferences = true; #if defined(FEATURE_EH_FUNCLETS) // On Win64 and ARM, we may have unwound this crawlFrame and thus, shouldn't report the invalid // references it may contain. // todo. fReportGCReferences = pCF->ShouldCrawlframeReportGCReferences(); #endif // defined(FEATURE_EH_FUNCLETS) Frame *pFrame = ((DacScanContext*)gcctx->sc)->pFrame = pCF->GetFrame(); EX_TRY { if (fReportGCReferences) { if (pCF->IsFrameless()) { ICodeManager * pCM = pCF->GetCodeManager(); _ASSERTE(pCM != NULL); unsigned flags = pCF->GetCodeManagerFlags(); pCM->EnumGcRefs(pCF->GetRegisterSet(), pCF->GetCodeInfo(), flags, dsc->pEnumFunc, pData); } else { pFrame->GcScanRoots(gcctx->f, gcctx->sc); } } } EX_CATCH { SOSStackErrorList *err = new SOSStackErrorList; err->pNext = NULL; if (pFrame) { err->error.SourceType = SOS_StackSourceFrame; err->error.Source = dac_cast<PTR_Frame>(pFrame).GetAddr(); } else { err->error.SourceType = SOS_StackSourceIP; err->error.Source = TO_CDADDR(dsc->pc); } if (dsc->pWalker->mErrors == NULL) { dsc->pWalker->mErrors = err; } else { // This exception case should be non-existent. It only happens when there is either // a clr!Frame on the callstack which is not properly dac-ized, or when a call down // EnumGcRefs causes a data read exception. Since this is so rare, we don't worry // about making this code very efficient. SOSStackErrorList *curr = dsc->pWalker->mErrors; while (curr->pNext) curr = curr->pNext; curr->pNext = err; } } EX_END_CATCH(SwallowAllExceptions) #if 0 // todo // If we're executing a LCG dynamic method then we must promote the associated resolver to ensure it // doesn't get collected and yank the method code out from under us). // Be careful to only promote the reference -- we can also be called to relocate the reference and // that can lead to all sorts of problems since we could be racing for the relocation with the long // weak handle we recover the reference from. Promoting the reference is enough, the handle in the // reference will be relocated properly as long as we keep it alive till the end of the collection // as long as the reference is actually maintained by the long weak handle. if (pMD) { BOOL fMaybeCollectibleMethod = TRUE; // If this is a frameless method then the jitmanager can answer the question of whether // or not this is LCG simply by looking at the heap where the code lives, however there // is also the prestub case where we need to explicitly look at the MD for stuff that isn't // ngen'd if (pCF->IsFrameless() && pMD->IsLCGMethod()) { fMaybeCollectibleMethod = ExecutionManager::IsCollectibleMethod(pCF->GetMethodToken()); } if (fMaybeCollectibleMethod && pMD->IsLCGMethod()) { PTR_Object obj = OBJECTREFToObject(pMD->AsDynamicMethodDesc()->GetLCGMethodResolver()->GetManagedResolver()); dsc->pWalker->ReportObject(obj); } else { if (fMaybeCollectibleMethod) { PTR_Object obj = pMD->GetLoaderAllocator()->GetExposedObject(); dsc->pWalker->ReportObject(obj); } if (fReportGCReferences) { GenericParamContextType paramContextType = GENERIC_PARAM_CONTEXT_NONE; if (pCF->IsFrameless()) { // We need to grab the Context Type here because there are cases where the MethodDesc // is shared, and thus indicates there should be an instantion argument, but the JIT // was still allowed to optimize it away and we won't grab it below because we're not // reporting any references from this frame. paramContextType = pCF->GetCodeManager()->GetParamContextType(pCF->GetRegisterSet(), pCF->GetCodeInfo()); } else { if (pMD->RequiresInstMethodDescArg()) paramContextType = GENERIC_PARAM_CONTEXT_METHODDESC; else if (pMD->RequiresInstMethodTableArg()) paramContextType = GENERIC_PARAM_CONTEXT_METHODTABLE; } // Handle the case where the method is a static shared generic method and we need to keep the type of the generic parameters alive if (paramContextType == GENERIC_PARAM_CONTEXT_METHODDESC) { MethodDesc *pMDReal = dac_cast<PTR_MethodDesc>(pCF->GetParamTypeArg()); _ASSERTE((pMDReal != NULL) || !pCF->IsFrameless()); if (pMDReal != NULL) { PTR_Object obj = pMDReal->GetLoaderAllocator()->GetExposedObject(); dsc->pWalker->ReportObject(obj); } } else if (paramContextType == GENERIC_PARAM_CONTEXT_METHODTABLE) { MethodTable *pMTReal = dac_cast<PTR_MethodTable>(pCF->GetParamTypeArg()); _ASSERTE((pMTReal != NULL) || !pCF->IsFrameless()); if (pMTReal != NULL) { PTR_Object obj = pMTReal->GetLoaderAllocator()->GetExposedObject(); dsc->pWalker->ReportObject(obj); } } } } } #endif return SWA_CONTINUE; } DacStackReferenceErrorEnum::DacStackReferenceErrorEnum(DacStackReferenceWalker *pEnum, SOSStackErrorList *pErrors) : mEnum(pEnum), mHead(pErrors), mCurr(pErrors) { _ASSERTE(mEnum); if (mHead != NULL) mEnum->AddRef(); } DacStackReferenceErrorEnum::~DacStackReferenceErrorEnum() { if (mHead) mEnum->Release(); } HRESULT DacStackReferenceErrorEnum::Skip(unsigned int count) { unsigned int i = 0; for (i = 0; i < count && mCurr; ++i) mCurr = mCurr->pNext; return i < count ? S_FALSE : S_OK; } HRESULT DacStackReferenceErrorEnum::Reset() { mCurr = mHead; return S_OK; } HRESULT DacStackReferenceErrorEnum::GetCount(unsigned int *pCount) { SOSStackErrorList *curr = mHead; unsigned int count = 0; while (curr) { curr = curr->pNext; count++; } *pCount = count; return S_OK; } HRESULT DacStackReferenceErrorEnum::Next(unsigned int count, SOSStackRefError ref[], unsigned int *pFetched) { if (pFetched == NULL || ref == NULL) return E_POINTER; unsigned int i; for (i = 0; i < count && mCurr; ++i, mCurr = mCurr->pNext) ref[i] = mCurr->error; *pFetched = i; return i < count ? S_FALSE : S_OK; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. //***************************************************************************** // File: daccess.cpp // // // ClrDataAccess implementation. // //***************************************************************************** #include "stdafx.h" #include <clrdata.h> #include "typestring.h" #include "holder.h" #include "debuginfostore.h" #include "peimagelayout.inl" #include "datatargetadapter.h" #include "readonlydatatargetfacade.h" #include "metadataexports.h" #include "excep.h" #include "debugger.h" #include "dwreport.h" #include "primitives.h" #include "dbgutil.h" #ifdef TARGET_UNIX #ifdef USE_DAC_TABLE_RVA #include <dactablerva.h> #else extern "C" bool TryGetSymbol(ICorDebugDataTarget* dataTarget, uint64_t baseAddress, const char* symbolName, uint64_t* symbolAddress); #endif #endif #include "dwbucketmanager.hpp" #include "gcinterface.dac.h" // To include definiton of IsThrowableThreadAbortException // #include <exstatecommon.h> CRITICAL_SECTION g_dacCritSec; ClrDataAccess* g_dacImpl; EXTERN_C #ifdef TARGET_UNIX DLLEXPORT // For Win32 PAL LoadLibrary emulation #endif BOOL WINAPI DllMain(HANDLE instance, DWORD reason, LPVOID reserved) { static bool g_procInitialized = false; switch(reason) { case DLL_PROCESS_ATTACH: { if (g_procInitialized) { #ifdef HOST_UNIX // Double initialization can happen on Unix // in case of manual load of DAC shared lib and calling DllMain // not a big deal, we just ignore it. return TRUE; #else return FALSE; #endif } #ifdef HOST_UNIX int err = PAL_InitializeDLL(); if(err != 0) { return FALSE; } #endif InitializeCriticalSection(&g_dacCritSec); g_procInitialized = true; break; } case DLL_PROCESS_DETACH: // It's possible for this to be called without ATTACH completing (eg. if it failed) if (g_procInitialized) { DeleteCriticalSection(&g_dacCritSec); } g_procInitialized = false; break; } return TRUE; } HRESULT ConvertUtf8(_In_ LPCUTF8 utf8, ULONG32 bufLen, ULONG32* nameLen, _Out_writes_to_opt_(bufLen, *nameLen) PWSTR buffer) { if (nameLen) { *nameLen = WszMultiByteToWideChar(CP_UTF8, 0, utf8, -1, NULL, 0); if (!*nameLen) { return HRESULT_FROM_GetLastError(); } } if (buffer && bufLen) { if (!WszMultiByteToWideChar(CP_UTF8, 0, utf8, -1, buffer, bufLen)) { return HRESULT_FROM_GetLastError(); } } return S_OK; } HRESULT AllocUtf8(_In_opt_ LPCWSTR wstr, ULONG32 srcChars, _Outptr_ LPUTF8* utf8) { ULONG32 chars = WszWideCharToMultiByte(CP_UTF8, 0, wstr, srcChars, NULL, 0, NULL, NULL); if (!chars) { return HRESULT_FROM_GetLastError(); } // Make sure the converted string is always terminated. if (srcChars != (ULONG32)-1) { if (!ClrSafeInt<ULONG32>::addition(chars, 1, chars)) { return HRESULT_FROM_WIN32(ERROR_ARITHMETIC_OVERFLOW); } } char* mem = new (nothrow) char[chars]; if (!mem) { return E_OUTOFMEMORY; } if (!WszWideCharToMultiByte(CP_UTF8, 0, wstr, srcChars, mem, chars, NULL, NULL)) { HRESULT hr = HRESULT_FROM_GetLastError(); delete [] mem; return hr; } if (srcChars != (ULONG32)-1) { mem[chars - 1] = 0; } *utf8 = mem; return S_OK; } HRESULT GetFullClassNameFromMetadata(IMDInternalImport* mdImport, mdTypeDef classToken, ULONG32 bufferChars, _Inout_updates_(bufferChars) LPUTF8 buffer) { HRESULT hr; LPCUTF8 baseName, namespaceName; IfFailRet(mdImport->GetNameOfTypeDef(classToken, &baseName, &namespaceName)); return ns::MakePath(buffer, bufferChars, namespaceName, baseName) ? S_OK : E_OUTOFMEMORY; } HRESULT GetFullMethodNameFromMetadata(IMDInternalImport* mdImport, mdMethodDef methodToken, ULONG32 bufferChars, _Inout_updates_(bufferChars) LPUTF8 buffer) { HRESULT status; HRESULT hr; mdTypeDef classToken; size_t len; if (mdImport->GetParentToken(methodToken, &classToken) == S_OK) { if ((status = GetFullClassNameFromMetadata(mdImport, classToken, bufferChars, buffer)) != S_OK) { return status; } len = strlen(buffer); buffer += len; bufferChars -= static_cast<ULONG32>(len) + 1; if (!bufferChars) { return E_OUTOFMEMORY; } *buffer++ = NAMESPACE_SEPARATOR_CHAR; } LPCUTF8 methodName; IfFailRet(mdImport->GetNameOfMethodDef(methodToken, &methodName)); len = strlen(methodName); if (len >= bufferChars) { return E_OUTOFMEMORY; } strcpy_s(buffer, bufferChars, methodName); return S_OK; } HRESULT SplitFullName(_In_z_ PCWSTR fullName, SplitSyntax syntax, ULONG32 memberDots, _Outptr_opt_ LPUTF8* namespaceName, _Outptr_opt_ LPUTF8* typeName, _Outptr_opt_ LPUTF8* memberName, _Outptr_opt_ LPUTF8* params) { HRESULT status; PCWSTR paramsStart, memberStart, memberEnd, typeStart; if (!*fullName) { return E_INVALIDARG; } // // Split off parameters. // paramsStart = wcschr(fullName, W('(')); if (paramsStart) { if (syntax != SPLIT_METHOD || paramsStart == fullName) { return E_INVALIDARG; } if ((status = AllocUtf8(paramsStart, (ULONG32)-1, params)) != S_OK) { return status; } memberEnd = paramsStart - 1; } else { *params = NULL; memberEnd = fullName + (wcslen(fullName) - 1); } if (syntax != SPLIT_TYPE) { // // Split off member name. // memberStart = memberEnd; for (;;) { while (memberStart >= fullName && *memberStart != W('.')) { memberStart--; } // Some member names (e.g. .ctor and .dtor) have // dots, so go back to the first dot. while (memberStart > fullName && memberStart[-1] == W('.')) { memberStart--; } if (memberStart <= fullName) { if (memberDots > 0) { // Caller expected dots in the // member name and they weren't found. status = E_INVALIDARG; goto DelParams; } break; } else if (memberDots == 0) { break; } memberStart--; memberDots--; } memberStart++; if (memberStart > memberEnd) { status = E_INVALIDARG; goto DelParams; } if ((status = AllocUtf8(memberStart, (ULONG32) (memberEnd - memberStart) + 1, memberName)) != S_OK) { goto DelParams; } } else { *memberName = NULL; memberStart = memberEnd + 2; } // // Split off type name. // if (memberStart > fullName) { // Must have at least one character for the type // name. If there was a member name, there must // also be a separator. if (memberStart < fullName + 2) { status = E_INVALIDARG; goto DelMember; } typeStart = memberStart - 2; while (typeStart >= fullName && *typeStart != W('.')) { typeStart--; } typeStart++; if ((status = AllocUtf8(typeStart, (ULONG32) (memberStart - typeStart) - 1, typeName)) != S_OK) { goto DelMember; } } else { *typeName = NULL; typeStart = fullName; } // // Namespace must be the rest. // if (typeStart > fullName) { if ((status = AllocUtf8(fullName, (ULONG32) (typeStart - fullName) - 1, namespaceName)) != S_OK) { goto DelType; } } else { *namespaceName = NULL; } return S_OK; DelType: delete [] (*typeName); DelMember: delete [] (*memberName); DelParams: delete [] (*params); return status; } int CompareUtf8(_In_ LPCUTF8 str1, _In_ LPCUTF8 str2, _In_ ULONG32 nameFlags) { if (nameFlags & CLRDATA_BYNAME_CASE_INSENSITIVE) { // XXX Microsoft - Convert to Unicode? return SString::_stricmp(str1, str2); } return strcmp(str1, str2); } //---------------------------------------------------------------------------- // // MetaEnum. // //---------------------------------------------------------------------------- HRESULT MetaEnum::Start(IMDInternalImport* mdImport, ULONG32 kind, mdToken container) { HRESULT status; switch(kind) { case mdtTypeDef: status = mdImport->EnumTypeDefInit(&m_enum); break; case mdtMethodDef: case mdtFieldDef: status = mdImport->EnumInit(kind, container, &m_enum); break; default: return E_INVALIDARG; } if (status != S_OK) { return status; } m_mdImport = mdImport; m_kind = kind; return S_OK; } void MetaEnum::End(void) { if (!m_mdImport) { return; } switch(m_kind) { case mdtTypeDef: case mdtMethodDef: case mdtFieldDef: m_mdImport->EnumClose(&m_enum); break; } Clear(); } HRESULT MetaEnum::NextToken(mdToken* token, _Outptr_opt_result_maybenull_ LPCUTF8* namespaceName, _Outptr_opt_result_maybenull_ LPCUTF8* name) { HRESULT hr; if (!m_mdImport) { return E_INVALIDARG; } switch(m_kind) { case mdtTypeDef: if (!m_mdImport->EnumNext(&m_enum, token)) { return S_FALSE; } m_lastToken = *token; if (namespaceName || name) { LPCSTR _name, _namespaceName; IfFailRet(m_mdImport->GetNameOfTypeDef(*token, &_name, &_namespaceName)); if (namespaceName) { *namespaceName = _namespaceName; } if (name) { *name = _name; } } return S_OK; case mdtMethodDef: if (!m_mdImport->EnumNext(&m_enum, token)) { return S_FALSE; } m_lastToken = *token; if (namespaceName) { *namespaceName = NULL; } if (name != NULL) { IfFailRet(m_mdImport->GetNameOfMethodDef(*token, name)); } return S_OK; case mdtFieldDef: if (!m_mdImport->EnumNext(&m_enum, token)) { return S_FALSE; } m_lastToken = *token; if (namespaceName) { *namespaceName = NULL; } if (name != NULL) { IfFailRet(m_mdImport->GetNameOfFieldDef(*token, name)); } return S_OK; default: return E_INVALIDARG; } } HRESULT MetaEnum::NextDomainToken(AppDomain** appDomain, mdToken* token) { HRESULT status; if (m_appDomain) { // Use only the caller-provided app domain. *appDomain = m_appDomain; return NextToken(token, NULL, NULL); } // // Splay tokens across all app domains. // for (;;) { if (m_lastToken == mdTokenNil) { // Need to fetch a token. if ((status = NextToken(token, NULL, NULL)) != S_OK) { return status; } m_domainIter.Init(); } if (m_domainIter.Next()) { break; } m_lastToken = mdTokenNil; } *appDomain = m_domainIter.GetDomain(); *token = m_lastToken; return S_OK; } HRESULT MetaEnum::NextTokenByName(_In_opt_ LPCUTF8 namespaceName, _In_opt_ LPCUTF8 name, ULONG32 nameFlags, mdToken* token) { HRESULT status; LPCUTF8 tokNamespace, tokName; for (;;) { if ((status = NextToken(token, &tokNamespace, &tokName)) != S_OK) { return status; } if (namespaceName && (!tokNamespace || CompareUtf8(namespaceName, tokNamespace, nameFlags) != 0)) { continue; } if (name && (!tokName || CompareUtf8(name, tokName, nameFlags) != 0)) { continue; } return S_OK; } } HRESULT MetaEnum::NextDomainTokenByName(_In_opt_ LPCUTF8 namespaceName, _In_opt_ LPCUTF8 name, ULONG32 nameFlags, AppDomain** appDomain, mdToken* token) { HRESULT status; if (m_appDomain) { // Use only the caller-provided app domain. *appDomain = m_appDomain; return NextTokenByName(namespaceName, name, nameFlags, token); } // // Splay tokens across all app domains. // for (;;) { if (m_lastToken == mdTokenNil) { // Need to fetch a token. if ((status = NextTokenByName(namespaceName, name, nameFlags, token)) != S_OK) { return status; } m_domainIter.Init(); } if (m_domainIter.Next()) { break; } m_lastToken = mdTokenNil; } *appDomain = m_domainIter.GetDomain(); *token = m_lastToken; return S_OK; } HRESULT MetaEnum::New(Module* mod, ULONG32 kind, mdToken container, IXCLRDataAppDomain* pubAppDomain, MetaEnum** metaEnumRet, CLRDATA_ENUM* handle) { HRESULT status; MetaEnum* metaEnum; if (handle) { *handle = TO_CDENUM(NULL); } metaEnum = new (nothrow) MetaEnum; if (!metaEnum) { return E_OUTOFMEMORY; } if ((status = metaEnum-> Start(mod->GetMDImport(), kind, container)) != S_OK) { delete metaEnum; return status; } if (pubAppDomain) { metaEnum->m_appDomain = ((ClrDataAppDomain*)pubAppDomain)->GetAppDomain(); } if (metaEnumRet) { *metaEnumRet = metaEnum; } if (handle) { *handle = TO_CDENUM(metaEnum); } return S_OK; } //---------------------------------------------------------------------------- // // SplitName // //---------------------------------------------------------------------------- SplitName::SplitName(SplitSyntax syntax, ULONG32 nameFlags, ULONG32 memberDots) { m_syntax = syntax; m_nameFlags = nameFlags; m_memberDots = memberDots; Clear(); } void SplitName::Delete(void) { delete [] m_namespaceName; m_namespaceName = NULL; delete [] m_typeName; m_typeName = NULL; delete [] m_memberName; m_memberName = NULL; delete [] m_params; m_params = NULL; } void SplitName::Clear(void) { m_namespaceName = NULL; m_typeName = NULL; m_typeToken = mdTypeDefNil; m_memberName = NULL; m_memberToken = mdTokenNil; m_params = NULL; m_tlsThread = NULL; m_metaEnum.m_appDomain = NULL; m_module = NULL; m_lastField = NULL; } HRESULT SplitName::SplitString(_In_opt_ PCWSTR fullName) { if (m_syntax == SPLIT_NO_NAME) { if (fullName) { return E_INVALIDARG; } return S_OK; } else if (!fullName) { return E_INVALIDARG; } return SplitFullName(fullName, m_syntax, m_memberDots, &m_namespaceName, &m_typeName, &m_memberName, &m_params); } FORCEINLINE WCHAR* wcrscan(LPCWSTR beg, LPCWSTR end, WCHAR ch) { //_ASSERTE(beg <= end); WCHAR *p; for (p = (WCHAR*)end; p >= beg; --p) { if (*p == ch) break; } return p; } // This functions allocates a new UTF8 string that contains the classname // lying between the current sepName and the previous sepName. E.g. for a // class name of "Outer+middler+inner" when sepName points to the NULL // terminator this function will return "inner" in pResult and will update // sepName to point to the second '+' character in the string. When sepName // points to the first '+' character this function will return "Outer" in // pResult and sepName will point one WCHAR before fullName. HRESULT NextEnclosingClasName(LPCWSTR fullName, _Outref_ LPWSTR& sepName, _Outptr_ LPUTF8 *pResult) { if (sepName < fullName) { return E_FAIL; } //_ASSERTE(*sepName == W('\0') || *sepName == W('+') || *sepName == W('/')); LPWSTR origInnerName = sepName-1; if ((sepName = wcrscan(fullName, origInnerName, W('+'))) < fullName) { sepName = wcrscan(fullName, origInnerName, W('/')); } return AllocUtf8(sepName+1, static_cast<ULONG32>(origInnerName-sepName), pResult); } bool SplitName::FindType(IMDInternalImport* mdInternal) { if (m_typeToken != mdTypeDefNil) { return true; } if (!m_typeName) { return false; } if ((m_namespaceName == NULL || m_namespaceName[0] == '\0') && (CompareUtf8(COR_MODULE_CLASS, m_typeName, m_nameFlags)==0)) { m_typeToken = TokenFromRid(1, mdtTypeDef); // <Module> class always has a RID of 1. return true; } MetaEnum metaEnum; if (metaEnum.Start(mdInternal, mdtTypeDef, mdTypeDefNil) != S_OK) { return false; } LPUTF8 curClassName; ULONG32 length; WCHAR wszName[MAX_CLASS_NAME]; if (ConvertUtf8(m_typeName, MAX_CLASS_NAME, &length, wszName) != S_OK) { return false; } WCHAR *pHead; Retry: pHead = wszName + length; if (FAILED(NextEnclosingClasName(wszName, pHead, &curClassName))) { return false; } // an inner class has an empty namespace associated with it HRESULT hr = metaEnum.NextTokenByName((pHead < wszName) ? m_namespaceName : "", curClassName, m_nameFlags, &m_typeToken); delete[] curClassName; if (hr != S_OK) { // if we didn't find a token with the given name return false; } else if (pHead < wszName) { // if we did find a token, *and* the class name given // does not specify any enclosing class, that's it return true; } else { // restart with innermost class pHead = wszName + length; mdTypeDef tkInner = m_typeToken; mdTypeDef tkOuter; BOOL bRetry = FALSE; LPUTF8 utf8Name; while ( !bRetry && SUCCEEDED(NextEnclosingClasName(wszName, pHead, &utf8Name)) ) { if (mdInternal->GetNestedClassProps(tkInner, &tkOuter) != S_OK) tkOuter = mdTypeDefNil; LPCSTR szName, szNS; if (FAILED(mdInternal->GetNameOfTypeDef(tkInner, &szName, &szNS))) { return false; } bRetry = (CompareUtf8(utf8Name, szName, m_nameFlags) != 0); if (!bRetry) { // if this is outermost class we need to compare namespaces too if (tkOuter == mdTypeDefNil) { // is this the outermost in the class name, too? if (pHead < wszName && CompareUtf8(m_namespaceName ? m_namespaceName : "", szNS, m_nameFlags) == 0) { delete[] utf8Name; return true; } else { bRetry = TRUE; } } } delete[] utf8Name; tkInner = tkOuter; } goto Retry; } } bool SplitName::FindMethod(IMDInternalImport* mdInternal) { if (m_memberToken != mdTokenNil) { return true; } if (m_typeToken == mdTypeDefNil || !m_memberName) { return false; } ULONG32 EmptySig = 0; // XXX Microsoft - Compare using signature when available. if (mdInternal->FindMethodDefUsingCompare(m_typeToken, m_memberName, (PCCOR_SIGNATURE)&EmptySig, sizeof(EmptySig), NULL, NULL, &m_memberToken) != S_OK) { m_memberToken = mdTokenNil; return false; } return true; } bool SplitName::FindField(IMDInternalImport* mdInternal) { if (m_memberToken != mdTokenNil) { return true; } if (m_typeToken == mdTypeDefNil || !m_memberName || m_params) { // Can't have params with a field. return false; } MetaEnum metaEnum; if (metaEnum.Start(mdInternal, mdtFieldDef, m_typeToken) != S_OK) { return false; } return metaEnum.NextTokenByName(NULL, m_memberName, m_nameFlags, &m_memberToken) == S_OK; } HRESULT SplitName::AllocAndSplitString(_In_opt_ PCWSTR fullName, SplitSyntax syntax, ULONG32 nameFlags, ULONG32 memberDots, SplitName** split) { HRESULT status; if (nameFlags & ~(CLRDATA_BYNAME_CASE_SENSITIVE | CLRDATA_BYNAME_CASE_INSENSITIVE)) { return E_INVALIDARG; } *split = new (nothrow) SplitName(syntax, nameFlags, memberDots); if (!*split) { return E_OUTOFMEMORY; } if ((status = (*split)->SplitString(fullName)) != S_OK) { delete (*split); return status; } return S_OK; } HRESULT SplitName::CdStartMethod(_In_opt_ PCWSTR fullName, ULONG32 nameFlags, Module* mod, mdTypeDef typeToken, AppDomain* appDomain, IXCLRDataAppDomain* pubAppDomain, SplitName** splitRet, CLRDATA_ENUM* handle) { HRESULT status; SplitName* split; ULONG methDots = 0; *handle = TO_CDENUM(NULL); Retry: if ((status = SplitName:: AllocAndSplitString(fullName, SPLIT_METHOD, nameFlags, methDots, &split)) != S_OK) { return status; } if (typeToken == mdTypeDefNil) { if (!split->FindType(mod->GetMDImport())) { bool hasNamespace = split->m_namespaceName != NULL; delete split; // // We may have a case where there's an // explicitly implemented method which // has dots in the name. If it's possible // to move the method name dot split // back, go ahead and retry that way. // if (hasNamespace) { methDots++; goto Retry; } return E_INVALIDARG; } typeToken = split->m_typeToken; } else { if (split->m_namespaceName || split->m_typeName) { delete split; return E_INVALIDARG; } } if ((status = split->m_metaEnum. Start(mod->GetMDImport(), mdtMethodDef, typeToken)) != S_OK) { delete split; return status; } split->m_metaEnum.m_appDomain = appDomain; if (pubAppDomain) { split->m_metaEnum.m_appDomain = ((ClrDataAppDomain*)pubAppDomain)->GetAppDomain(); } split->m_module = mod; *handle = TO_CDENUM(split); if (splitRet) { *splitRet = split; } return S_OK; } HRESULT SplitName::CdNextMethod(CLRDATA_ENUM* handle, mdMethodDef* token) { SplitName* split = FROM_CDENUM(SplitName, *handle); if (!split) { return E_INVALIDARG; } return split->m_metaEnum. NextTokenByName(NULL, split->m_memberName, split->m_nameFlags, token); } HRESULT SplitName::CdNextDomainMethod(CLRDATA_ENUM* handle, AppDomain** appDomain, mdMethodDef* token) { SplitName* split = FROM_CDENUM(SplitName, *handle); if (!split) { return E_INVALIDARG; } return split->m_metaEnum. NextDomainTokenByName(NULL, split->m_memberName, split->m_nameFlags, appDomain, token); } HRESULT SplitName::CdStartField(_In_opt_ PCWSTR fullName, ULONG32 nameFlags, ULONG32 fieldFlags, IXCLRDataTypeInstance* fromTypeInst, TypeHandle typeHandle, Module* mod, mdTypeDef typeToken, ULONG64 objBase, Thread* tlsThread, IXCLRDataTask* pubTlsThread, AppDomain* appDomain, IXCLRDataAppDomain* pubAppDomain, SplitName** splitRet, CLRDATA_ENUM* handle) { HRESULT status; SplitName* split; *handle = TO_CDENUM(NULL); if ((status = SplitName:: AllocAndSplitString(fullName, fullName ? SPLIT_FIELD : SPLIT_NO_NAME, nameFlags, 0, &split)) != S_OK) { return status; } if (typeHandle.IsNull()) { if (typeToken == mdTypeDefNil) { if (!split->FindType(mod->GetMDImport())) { status = E_INVALIDARG; goto Fail; } typeToken = split->m_typeToken; } else { if (split->m_namespaceName || split->m_typeName) { status = E_INVALIDARG; goto Fail; } } // With phased class loading, this may return a partially-loaded type // @todo : does this matter? typeHandle = mod->LookupTypeDef(split->m_typeToken); if (typeHandle.IsNull()) { status = E_UNEXPECTED; goto Fail; } } if ((status = InitFieldIter(&split->m_fieldEnum, typeHandle, true, fieldFlags, fromTypeInst)) != S_OK) { goto Fail; } split->m_objBase = objBase; split->m_tlsThread = tlsThread; if (pubTlsThread) { split->m_tlsThread = ((ClrDataTask*)pubTlsThread)->GetThread(); } split->m_metaEnum.m_appDomain = appDomain; if (pubAppDomain) { split->m_metaEnum.m_appDomain = ((ClrDataAppDomain*)pubAppDomain)->GetAppDomain(); } split->m_module = mod; *handle = TO_CDENUM(split); if (splitRet) { *splitRet = split; } return S_OK; Fail: delete split; return status; } HRESULT SplitName::CdNextField(ClrDataAccess* dac, CLRDATA_ENUM* handle, IXCLRDataTypeDefinition** fieldType, ULONG32* fieldFlags, IXCLRDataValue** value, ULONG32 nameBufRetLen, ULONG32* nameLenRet, _Out_writes_to_opt_(nameBufRetLen, *nameLenRet) WCHAR nameBufRet[ ], IXCLRDataModule** tokenScopeRet, mdFieldDef* tokenRet) { HRESULT status; SplitName* split = FROM_CDENUM(SplitName, *handle); if (!split) { return E_INVALIDARG; } FieldDesc* fieldDesc; while ((fieldDesc = split->m_fieldEnum.Next())) { if (split->m_syntax != SPLIT_NO_NAME) { LPCUTF8 fieldName; if (FAILED(fieldDesc->GetName_NoThrow(&fieldName)) || (split->Compare(split->m_memberName, fieldName) != 0)) { continue; } } split->m_lastField = fieldDesc; if (fieldFlags != NULL) { *fieldFlags = GetTypeFieldValueFlags(fieldDesc->GetFieldTypeHandleThrowing(), fieldDesc, split->m_fieldEnum. IsFieldFromParentClass() ? CLRDATA_FIELD_IS_INHERITED : 0, false); } if ((nameBufRetLen != 0) || (nameLenRet != NULL)) { LPCUTF8 szFieldName; status = fieldDesc->GetName_NoThrow(&szFieldName); if (status != S_OK) { return status; } status = ConvertUtf8( szFieldName, nameBufRetLen, nameLenRet, nameBufRet); if (status != S_OK) { return status; } } if (tokenScopeRet && !value) { *tokenScopeRet = new (nothrow) ClrDataModule(dac, fieldDesc->GetModule()); if (!*tokenScopeRet) { return E_OUTOFMEMORY; } } if (tokenRet) { *tokenRet = fieldDesc->GetMemberDef(); } if (fieldType) { TypeHandle fieldTypeHandle = fieldDesc->GetFieldTypeHandleThrowing(); *fieldType = new (nothrow) ClrDataTypeDefinition(dac, fieldTypeHandle.GetModule(), fieldTypeHandle.GetMethodTable()->GetCl(), fieldTypeHandle); if (!*fieldType && tokenScopeRet) { delete (ClrDataModule*)*tokenScopeRet; } return *fieldType ? S_OK : E_OUTOFMEMORY; } if (value) { return ClrDataValue:: NewFromFieldDesc(dac, split->m_metaEnum.m_appDomain, split->m_fieldEnum.IsFieldFromParentClass() ? CLRDATA_VALUE_IS_INHERITED : 0, fieldDesc, split->m_objBase, split->m_tlsThread, NULL, value, nameBufRetLen, nameLenRet, nameBufRet, tokenScopeRet, tokenRet); } return S_OK; } return S_FALSE; } HRESULT SplitName::CdNextDomainField(ClrDataAccess* dac, CLRDATA_ENUM* handle, IXCLRDataValue** value) { HRESULT status; SplitName* split = FROM_CDENUM(SplitName, *handle); if (!split) { return E_INVALIDARG; } if (split->m_metaEnum.m_appDomain) { // Use only the caller-provided app domain. return CdNextField(dac, handle, NULL, NULL, value, 0, NULL, NULL, NULL, NULL); } // // Splay fields across all app domains. // for (;;) { if (!split->m_lastField) { // Need to fetch a field. if ((status = CdNextField(dac, handle, NULL, NULL, NULL, 0, NULL, NULL, NULL, NULL)) != S_OK) { return status; } split->m_metaEnum.m_domainIter.Init(); } if (split->m_metaEnum.m_domainIter.Next()) { break; } split->m_lastField = NULL; } return ClrDataValue:: NewFromFieldDesc(dac, split->m_metaEnum.m_domainIter.GetDomain(), split->m_fieldEnum.IsFieldFromParentClass() ? CLRDATA_VALUE_IS_INHERITED : 0, split->m_lastField, split->m_objBase, split->m_tlsThread, NULL, value, 0, NULL, NULL, NULL, NULL); } HRESULT SplitName::CdStartType(_In_opt_ PCWSTR fullName, ULONG32 nameFlags, Module* mod, AppDomain* appDomain, IXCLRDataAppDomain* pubAppDomain, SplitName** splitRet, CLRDATA_ENUM* handle) { HRESULT status; SplitName* split; *handle = TO_CDENUM(NULL); if ((status = SplitName:: AllocAndSplitString(fullName, SPLIT_TYPE, nameFlags, 0, &split)) != S_OK) { return status; } if ((status = split->m_metaEnum. Start(mod->GetMDImport(), mdtTypeDef, mdTokenNil)) != S_OK) { delete split; return status; } split->m_metaEnum.m_appDomain = appDomain; if (pubAppDomain) { split->m_metaEnum.m_appDomain = ((ClrDataAppDomain*)pubAppDomain)->GetAppDomain(); } split->m_module = mod; *handle = TO_CDENUM(split); if (splitRet) { *splitRet = split; } return S_OK; } HRESULT SplitName::CdNextType(CLRDATA_ENUM* handle, mdTypeDef* token) { SplitName* split = FROM_CDENUM(SplitName, *handle); if (!split) { return E_INVALIDARG; } return split->m_metaEnum. NextTokenByName(split->m_namespaceName, split->m_typeName, split->m_nameFlags, token); } HRESULT SplitName::CdNextDomainType(CLRDATA_ENUM* handle, AppDomain** appDomain, mdTypeDef* token) { SplitName* split = FROM_CDENUM(SplitName, *handle); if (!split) { return E_INVALIDARG; } return split->m_metaEnum. NextDomainTokenByName(split->m_namespaceName, split->m_typeName, split->m_nameFlags, appDomain, token); } //---------------------------------------------------------------------------- // // DacInstanceManager. // // Data retrieved from the target process is cached for two reasons: // // 1. It may be necessary to map from the host address back to the target // address. For example, if any code uses a 'this' pointer or // takes the address of a field the address has to be translated from // host to target. This requires instances to be held as long as // they may be referenced. // // 2. Data is often referenced multiple times so caching is an important // performance advantage. // // Ideally we'd like to implement a simple page cache but this is // complicated by the fact that user minidump memory can have // arbitrary granularity and also that the member operator (->) // needs to return a pointer to an object. That means that all of // the data for an object must be sequential and cannot be split // at page boundaries. // // Data can also be accessed with different sizes. For example, // a base struct can be accessed, then cast to a derived struct and // accessed again with the larger derived size. The cache must // be able to replace data to maintain the largest amount of data // touched. // // We keep track of each access and the recovered memory for it. // A hash on target address allows quick access to instance data // by target address. The data for each access has a header on it // for bookkeeping purposes, so host address to target address translation // is just a matter of backing up to the header and pulling the target // address from it. Keeping each access separately allows easy // replacement by larger accesses. // //---------------------------------------------------------------------------- DacInstanceManager::DacInstanceManager(void) : m_unusedBlock(NULL) { InitEmpty(); } DacInstanceManager::~DacInstanceManager(void) { // We are stopping debugging in this case, so don't save any block of memory. // Otherwise, there will be a memory leak. Flush(false); } #if defined(DAC_HASHTABLE) DAC_INSTANCE* DacInstanceManager::Add(DAC_INSTANCE* inst) { // Assert that we don't add NULL instances. This allows us to assert that found instances // are not NULL in DacInstanceManager::Find _ASSERTE(inst != NULL); DWORD nHash = DAC_INSTANCE_HASH(inst->addr); HashInstanceKeyBlock* block = m_hash[nHash]; if (!block || block->firstElement == 0) { HashInstanceKeyBlock* newBlock; if (block) { newBlock = (HashInstanceKeyBlock*) new (nothrow) BYTE[HASH_INSTANCE_BLOCK_ALLOC_SIZE]; } else { // We allocate one big memory chunk that has a block for every index of the hash table to // improve data locality and reduce the number of allocs. In most cases, a hash bucket will // use only one block, so improving data locality across blocks (i.e. keeping the buckets of the // hash table together) should help. newBlock = (HashInstanceKeyBlock*) ClrVirtualAlloc(NULL, HASH_INSTANCE_BLOCK_ALLOC_SIZE*ARRAY_SIZE(m_hash), MEM_COMMIT, PAGE_READWRITE); } if (!newBlock) { return NULL; } if (block) { // We add the newest block to the start of the list assuming that most accesses are for // recently added elements. newBlock->next = block; m_hash[nHash] = newBlock; // The previously allocated block newBlock->firstElement = HASH_INSTANCE_BLOCK_NUM_ELEMENTS; block = newBlock; } else { for (DWORD j = 0; j < ARRAY_SIZE(m_hash); j++) { m_hash[j] = newBlock; newBlock->next = NULL; // The previously allocated block newBlock->firstElement = HASH_INSTANCE_BLOCK_NUM_ELEMENTS; newBlock = (HashInstanceKeyBlock*) (((BYTE*) newBlock) + HASH_INSTANCE_BLOCK_ALLOC_SIZE); } block = m_hash[nHash]; } } _ASSERTE(block->firstElement > 0); block->firstElement--; block->instanceKeys[block->firstElement].addr = inst->addr; block->instanceKeys[block->firstElement].instance = inst; inst->next = NULL; return inst; } #else //DAC_HASHTABLE DAC_INSTANCE* DacInstanceManager::Add(DAC_INSTANCE* inst) { _ASSERTE(inst != NULL); #ifdef _DEBUG bool isInserted = (m_hash.find(inst->addr) == m_hash.end()); #endif //_DEBUG DAC_INSTANCE *(&target) = m_hash[inst->addr]; _ASSERTE(!isInserted || target == NULL); if( target != NULL ) { //This is necessary to preserve the semantics of Supersede, however, it //is more or less dead code. inst->next = target; target = inst; //verify descending order _ASSERTE(inst->size >= target->size); } else { target = inst; } return inst; } #endif // #if defined(DAC_HASHTABLE) DAC_INSTANCE* DacInstanceManager::Alloc(TADDR addr, ULONG32 size, DAC_USAGE_TYPE usage) { SUPPORTS_DAC_HOST_ONLY; DAC_INSTANCE_BLOCK* block; DAC_INSTANCE* inst; ULONG32 fullSize; static_assert_no_msg(sizeof(DAC_INSTANCE_BLOCK) <= DAC_INSTANCE_ALIGN); static_assert_no_msg((sizeof(DAC_INSTANCE) & (DAC_INSTANCE_ALIGN - 1)) == 0); // // All allocated instances must be kept alive as long // as anybody may have a host pointer for one of them. // This means that we cannot delete an arbitrary instance // unless we are sure no pointers exist, which currently // is not possible to determine, thus we just hold everything // until a Flush. This greatly simplifies instance allocation // as we can then just sweep through large blocks rather // than having to use a real allocator. The only // complication is that we need to keep all instance // data aligned. We have guaranteed that the header will // preserve alignment of the data following if the header // is aligned, so as long as we round up all allocations // to a multiple of the alignment size everything just works. // fullSize = (size + DAC_INSTANCE_ALIGN - 1) & ~(DAC_INSTANCE_ALIGN - 1); _ASSERTE(fullSize && fullSize <= 0xffffffff - 2 * sizeof(*inst)); fullSize += sizeof(*inst); // // Check for an existing block with space. // for (block = m_blocks; block; block = block->next) { if (fullSize <= block->bytesFree) { break; } } if (!block) { // // No existing block has enough space, so allocate a new // one if necessary and link it in. We know we're allocating large // blocks so directly VirtualAlloc. We save one block through a // flush so that we spend less time allocating/deallocating. // ULONG32 blockSize = fullSize + DAC_INSTANCE_ALIGN; if (blockSize < DAC_INSTANCE_BLOCK_ALLOCATION) { blockSize = DAC_INSTANCE_BLOCK_ALLOCATION; } // If we have a saved block and it's large enough, use it. block = m_unusedBlock; if ((block != NULL) && ((block->bytesUsed + block->bytesFree) >= blockSize)) { m_unusedBlock = NULL; // Right now, we're locked to DAC_INSTANCE_BLOCK_ALLOCATION but // that might change in the future if we decide to do something // else with the size guarantee in code:DacInstanceManager::FreeAllBlocks blockSize = block->bytesUsed + block->bytesFree; } else { block = (DAC_INSTANCE_BLOCK*) ClrVirtualAlloc(NULL, blockSize, MEM_COMMIT, PAGE_READWRITE); } if (!block) { return NULL; } // Keep the first aligned unit for the block header. block->bytesUsed = DAC_INSTANCE_ALIGN; block->bytesFree = blockSize - DAC_INSTANCE_ALIGN; block->next = m_blocks; m_blocks = block; m_blockMemUsage += blockSize; } inst = (DAC_INSTANCE*)((PBYTE)block + block->bytesUsed); block->bytesUsed += fullSize; _ASSERTE(block->bytesFree >= fullSize); block->bytesFree -= fullSize; inst->next = NULL; inst->addr = addr; inst->size = size; inst->sig = DAC_INSTANCE_SIG; inst->usage = usage; inst->enumMem = 0; inst->MDEnumed = 0; m_numInst++; m_instMemUsage += fullSize; return inst; } void DacInstanceManager::ReturnAlloc(DAC_INSTANCE* inst) { SUPPORTS_DAC_HOST_ONLY; DAC_INSTANCE_BLOCK* block; DAC_INSTANCE_BLOCK * pPrevBlock; ULONG32 fullSize; // // This special routine handles cleanup in // cases where an instances has been allocated // but must be returned due to a following error. // The given instance must be the last instance // in an existing block. // fullSize = ((inst->size + DAC_INSTANCE_ALIGN - 1) & ~(DAC_INSTANCE_ALIGN - 1)) + sizeof(*inst); pPrevBlock = NULL; for (block = m_blocks; block; pPrevBlock = block, block = block->next) { if ((PBYTE)inst == (PBYTE)block + (block->bytesUsed - fullSize)) { break; } } if (!block) { return; } block->bytesUsed -= fullSize; block->bytesFree += fullSize; m_numInst--; m_instMemUsage -= fullSize; // If the block is empty after returning the specified instance, that means this block was newly created // when this instance was allocated. We have seen cases where we are asked to allocate a // large chunk of memory only to fail to read the memory from a dump later on, i.e. when both the target // address and the size are invalid. If we keep the allocation, we'll grow the VM size unnecessarily. // Thus, release a block if it's empty and if it's not the default size (to avoid thrashing memory). // See Dev10 Dbug 812112 for more information. if ((block->bytesUsed == DAC_INSTANCE_ALIGN) && ((block->bytesFree + block->bytesUsed) != DAC_INSTANCE_BLOCK_ALLOCATION)) { // The empty block is at the beginning of the list. if (pPrevBlock == NULL) { m_blocks = block->next; } else { _ASSERTE(pPrevBlock->next == block); pPrevBlock->next = block->next; } ClrVirtualFree(block, 0, MEM_RELEASE); } } #if defined(DAC_HASHTABLE) DAC_INSTANCE* DacInstanceManager::Find(TADDR addr) { #if defined(DAC_MEASURE_PERF) unsigned _int64 nStart, nEnd; g_nFindCalls++; nStart = GetCycleCount(); #endif // #if defined(DAC_MEASURE_PERF) HashInstanceKeyBlock* block = m_hash[DAC_INSTANCE_HASH(addr)]; #if defined(DAC_MEASURE_PERF) nEnd = GetCycleCount(); g_nFindHashTotalTime += nEnd - nStart; #endif // #if defined(DAC_MEASURE_PERF) while (block) { DWORD nIndex = block->firstElement; for (; nIndex < HASH_INSTANCE_BLOCK_NUM_ELEMENTS; nIndex++) { if (block->instanceKeys[nIndex].addr == addr) { #if defined(DAC_MEASURE_PERF) nEnd = GetCycleCount(); g_nFindHits++; g_nFindTotalTime += nEnd - nStart; if (g_nStackWalk) g_nFindStackTotalTime += nEnd - nStart; #endif // #if defined(DAC_MEASURE_PERF) DAC_INSTANCE* inst = block->instanceKeys[nIndex].instance; // inst should not be NULL even if the address was superseded. We search // the entries in the reverse order they were added. So we should have // found the superseding entry before this one. (Of course, if a NULL instance // has been added, this assert is meaningless. DacInstanceManager::Add // asserts that NULL instances aren't added.) _ASSERTE(inst != NULL); return inst; } } block = block->next; } #if defined(DAC_MEASURE_PERF) nEnd = GetCycleCount(); g_nFindFails++; g_nFindTotalTime += nEnd - nStart; if (g_nStackWalk) g_nFindStackTotalTime += nEnd - nStart; #endif // #if defined(DAC_MEASURE_PERF) return NULL; } #else //DAC_HASHTABLE DAC_INSTANCE* DacInstanceManager::Find(TADDR addr) { DacInstanceHashIterator iter = m_hash.find(addr); if( iter == m_hash.end() ) { return NULL; } else { return iter->second; } } #endif // if defined(DAC_HASHTABLE) HRESULT DacInstanceManager::Write(DAC_INSTANCE* inst, bool throwEx) { HRESULT status; if (inst->usage == DAC_VPTR) { // Skip over the host-side vtable pointer when // writing back. status = DacWriteAll(inst->addr + sizeof(TADDR), (PBYTE)(inst + 1) + sizeof(PVOID), inst->size - sizeof(TADDR), throwEx); } else { // Write the whole instance back. status = DacWriteAll(inst->addr, inst + 1, inst->size, throwEx); } return status; } #if defined(DAC_HASHTABLE) void DacInstanceManager::Supersede(DAC_INSTANCE* inst) { _ASSERTE(inst != NULL); // // This instance has been superseded by a larger // one and so must be removed from the hash. However, // code may be holding the instance pointer so it // can't just be deleted. Put it on a list for // later cleanup. // HashInstanceKeyBlock* block = m_hash[DAC_INSTANCE_HASH(inst->addr)]; while (block) { DWORD nIndex = block->firstElement; for (; nIndex < HASH_INSTANCE_BLOCK_NUM_ELEMENTS; nIndex++) { if (block->instanceKeys[nIndex].instance == inst) { block->instanceKeys[nIndex].instance = NULL; break; } } if (nIndex < HASH_INSTANCE_BLOCK_NUM_ELEMENTS) { break; } block = block->next; } AddSuperseded(inst); } #else //DAC_HASHTABLE void DacInstanceManager::Supersede(DAC_INSTANCE* inst) { _ASSERTE(inst != NULL); // // This instance has been superseded by a larger // one and so must be removed from the hash. However, // code may be holding the instance pointer so it // can't just be deleted. Put it on a list for // later cleanup. // DacInstanceHashIterator iter = m_hash.find(inst->addr); if( iter == m_hash.end() ) return; DAC_INSTANCE** bucket = &(iter->second); DAC_INSTANCE* cur = *bucket; DAC_INSTANCE* prev = NULL; //walk through the chain looking for this particular instance while (cur) { if (cur == inst) { if (!prev) { *bucket = inst->next; } else { prev->next = inst->next; } break; } prev = cur; cur = cur->next; } AddSuperseded(inst); } #endif // if defined(DAC_HASHTABLE) // This is the default Flush() called when the DAC cache is invalidated, // e.g. when we continue the debuggee process. In this case, we want to // save one block of memory to avoid thrashing. See the usage of m_unusedBlock // for more information. void DacInstanceManager::Flush(void) { Flush(true); } void DacInstanceManager::Flush(bool fSaveBlock) { SUPPORTS_DAC_HOST_ONLY; // // All allocated memory is in the block // list, so just free the blocks and // forget all the internal pointers. // for (;;) { FreeAllBlocks(fSaveBlock); DAC_INSTANCE_PUSH* push = m_instPushed; if (!push) { break; } m_instPushed = push->next; m_blocks = push->blocks; delete push; } // If we are not saving any memory blocks, then clear the saved buffer block (if any) as well. if (!fSaveBlock) { if (m_unusedBlock != NULL) { ClrVirtualFree(m_unusedBlock, 0, MEM_RELEASE); m_unusedBlock = NULL; } } #if defined(DAC_HASHTABLE) for (int i = STRING_LENGTH(m_hash); i >= 0; i--) { HashInstanceKeyBlock* block = m_hash[i]; HashInstanceKeyBlock* next; while (block) { next = block->next; if (next) { delete [] block; } else if (i == 0) { ClrVirtualFree(block, 0, MEM_RELEASE); } block = next; } } #else //DAC_HASHTABLE m_hash.clear(); #endif //DAC_HASHTABLE InitEmpty(); } #if defined(DAC_HASHTABLE) void DacInstanceManager::ClearEnumMemMarker(void) { ULONG i; DAC_INSTANCE* inst; for (i = 0; i < ARRAY_SIZE(m_hash); i++) { HashInstanceKeyBlock* block = m_hash[i]; while (block) { DWORD j; for (j = block->firstElement; j < HASH_INSTANCE_BLOCK_NUM_ELEMENTS; j++) { inst = block->instanceKeys[j].instance; if (inst != NULL) { inst->enumMem = 0; } } block = block->next; } } for (inst = m_superseded; inst; inst = inst->next) { inst->enumMem = 0; } } #else //DAC_HASHTABLE void DacInstanceManager::ClearEnumMemMarker(void) { ULONG i; DAC_INSTANCE* inst; DacInstanceHashIterator end = m_hash.end(); /* REVISIT_TODO Fri 10/20/2006 * This might have an issue, since it might miss chained entries off of * ->next. However, ->next is going away, and for all intents and * purposes, this never happens. */ for( DacInstanceHashIterator cur = m_hash.begin(); cur != end; ++cur ) { cur->second->enumMem = 0; } for (inst = m_superseded; inst; inst = inst->next) { inst->enumMem = 0; } } #endif // if defined(DAC_HASHTABLE) #if defined(DAC_HASHTABLE) // // // Iterating through all of the hash entry and report the memory // instance to minidump // // This function returns the total number of bytes that it reported. // // UINT DacInstanceManager::DumpAllInstances( ICLRDataEnumMemoryRegionsCallback *pCallBack) // memory report call back { ULONG i; DAC_INSTANCE* inst; UINT cbTotal = 0; #if defined(DAC_MEASURE_PERF) FILE* fp = fopen("c:\\dumpLog.txt", "a"); int total = 0; #endif // #if defined(DAC_MEASURE_PERF) for (i = 0; i < ARRAY_SIZE(m_hash); i++) { #if defined(DAC_MEASURE_PERF) int numInBucket = 0; #endif // #if defined(DAC_MEASURE_PERF) HashInstanceKeyBlock* block = m_hash[i]; while (block) { DWORD j; for (j = block->firstElement; j < HASH_INSTANCE_BLOCK_NUM_ELEMENTS; j++) { inst = block->instanceKeys[j].instance; // Only report those we intended to. // So far, only metadata is excluded! // if (inst && inst->noReport == 0) { cbTotal += inst->size; HRESULT hr = pCallBack->EnumMemoryRegion(TO_CDADDR(inst->addr), inst->size); if (hr == COR_E_OPERATIONCANCELED) { ThrowHR(hr); } } #if defined(DAC_MEASURE_PERF) if (inst) { numInBucket++; } #endif // #if defined(DAC_MEASURE_PERF) } block = block->next; } #if defined(DAC_MEASURE_PERF) fprintf(fp, "%4d: %4d%s", i, numInBucket, (i+1)%5? "; " : "\n"); total += numInBucket; #endif // #if defined(DAC_MEASURE_PERF) } #if defined(DAC_MEASURE_PERF) fprintf(fp, "\n\nTotal entries: %d\n\n", total); fclose(fp); #endif // #if defined(DAC_MEASURE_PERF) return cbTotal; } #else //DAC_HASHTABLE // // // Iterating through all of the hash entry and report the memory // instance to minidump // // This function returns the total number of bytes that it reported. // // UINT DacInstanceManager::DumpAllInstances( ICLRDataEnumMemoryRegionsCallback *pCallBack) // memory report call back { SUPPORTS_DAC_HOST_ONLY; DAC_INSTANCE* inst; UINT cbTotal = 0; #if defined(DAC_MEASURE_PERF) FILE* fp = fopen("c:\\dumpLog.txt", "a"); #endif // #if defined(DAC_MEASURE_PERF) #if defined(DAC_MEASURE_PERF) int numInBucket = 0; #endif // #if defined(DAC_MEASURE_PERF) DacInstanceHashIterator end = m_hash.end(); for (DacInstanceHashIterator cur = m_hash.begin(); end != cur; ++cur) { inst = cur->second; // Only report those we intended to. // So far, only metadata is excluded! // if (inst->noReport == 0) { cbTotal += inst->size; HRESULT hr = pCallBack->EnumMemoryRegion(TO_CDADDR(inst->addr), inst->size); if (hr == COR_E_OPERATIONCANCELED) { ThrowHR(hr); } } #if defined(DAC_MEASURE_PERF) numInBucket++; #endif // #if defined(DAC_MEASURE_PERF) } #if defined(DAC_MEASURE_PERF) fprintf(fp, "\n\nTotal entries: %d\n\n", numInBucket); fclose(fp); #endif // #if defined(DAC_MEASURE_PERF) return cbTotal; } #endif // if defined(DAC_HASHTABLE) DAC_INSTANCE_BLOCK* DacInstanceManager::FindInstanceBlock(DAC_INSTANCE* inst) { for (DAC_INSTANCE_BLOCK* block = m_blocks; block; block = block->next) { if ((PBYTE)inst >= (PBYTE)block && (PBYTE)inst < (PBYTE)block + block->bytesUsed) { return block; } } return NULL; } // If fSaveBlock is false, free all blocks of allocated memory. Otherwise, // free all blocks except the one we save to avoid thrashing memory. // Callers very frequently flush repeatedly with little memory needed in DAC // so this avoids wasteful repeated allocations/deallocations. // There is a very unlikely case that we'll have allocated an extremely large // block; if this is the only block we will save none since this block will // remain allocated. void DacInstanceManager::FreeAllBlocks(bool fSaveBlock) { DAC_INSTANCE_BLOCK* block; while ((block = m_blocks)) { m_blocks = block->next; // If we haven't saved our single block yet and this block is the default size // then we will save it instead of freeing it. This avoids saving an unnecessarily large // memory block. // Do *NOT* trash the byte counts. code:DacInstanceManager::Alloc // depends on them being correct when checking to see if a block is large enough. if (fSaveBlock && (m_unusedBlock == NULL) && ((block->bytesFree + block->bytesUsed) == DAC_INSTANCE_BLOCK_ALLOCATION)) { // Just to avoid confusion, since we're keeping it around. block->next = NULL; m_unusedBlock = block; } else { ClrVirtualFree(block, 0, MEM_RELEASE); } } } //---------------------------------------------------------------------------- // // DacStreamManager. // //---------------------------------------------------------------------------- #ifdef FEATURE_MINIMETADATA_IN_TRIAGEDUMPS namespace serialization { namespace bin { //======================================================================== // Support functions for binary serialization of simple types to a buffer: // - raw_size() returns the size in bytes of the binary representation // of a value. // - raw_serialize() copies the binary representation of a value into a // buffer. // - raw_deserialize() generates a value from its binary representation // in a buffer. // Beyond simple types the APIs below support SString instances. SStrings // are stored as UTF8 strings. //======================================================================== static const size_t ErrOverflow = (size_t)(-1); #ifndef TARGET_UNIX // Template class is_blittable template <typename _Ty, typename Enable = void> struct is_blittable : std::false_type { // determines whether _Ty is blittable }; template <typename _Ty> struct is_blittable<_Ty, typename std::enable_if<std::is_arithmetic<_Ty>::value>::type> : std::true_type { // determines whether _Ty is blittable }; // allow types to declare themselves blittable by including a static bool // member "is_blittable". template <typename _Ty> struct is_blittable<_Ty, typename std::enable_if<_Ty::is_blittable>::type> : std::true_type { // determines whether _Ty is blittable }; //======================================================================== // serialization::bin::Traits<T> enables binary serialization and // deserialization of instances of T. //======================================================================== // // General specialization for non-blittable types - must be overridden // for each specific non-blittable type. // template <typename T, typename Enable = void> class Traits { public: static FORCEINLINE size_t raw_size(const T & val) { static_assert(false, "Non-blittable types need explicit specializations"); } }; // // General type trait supporting serialization/deserialization of blittable // type arguments (as defined by the is_blittable<> type traits above). // template <typename T> class Traits<T, typename std::enable_if<is_blittable<T>::value>::type> { #else // TARGET_UNIX template <typename T> class Traits { #endif // !TARGET_UNIX public: // // raw_size() returns the size in bytes of the binary representation of a // value. // static FORCEINLINE size_t raw_size(const T & val) { return sizeof(T); } // // raw_serialize() copies the binary representation of a value into a // "dest" buffer that has "destSize" bytes available. // Returns raw_size(val), or ErrOverflow if the buffer does not have // enough space to accommodate "val". // static FORCEINLINE size_t raw_serialize(BYTE* dest, size_t destSize, const T & val) { size_t cnt = raw_size(val); if (destSize < cnt) { return ErrOverflow; } memcpy_s(dest, destSize, &val, cnt); return cnt; } // // raw_deserialize() generates a value "val" from its binary // representation in a buffer "src". // Returns raw_size(val), or ErrOverflow if the buffer does not have // enough space to accommodate "val". // static FORCEINLINE size_t raw_deserialize(T & val, const BYTE* src, size_t srcSize) { size_t cnt = raw_size(*(T*)src); if (srcSize < cnt) { return ErrOverflow; } memcpy_s(&val, cnt, src, cnt); return cnt; } }; // // Specialization for UTF8 strings // template<> class Traits<LPCUTF8> { public: static FORCEINLINE size_t raw_size(const LPCUTF8 & val) { return strlen(val) + 1; } static FORCEINLINE size_t raw_serialize(BYTE* dest, size_t destSize, const LPCUTF8 & val) { size_t cnt = raw_size(val); if (destSize < cnt) { return ErrOverflow; } memcpy_s(dest, destSize, &val, cnt); return cnt; } static FORCEINLINE size_t raw_deserialize(LPCUTF8 & val, const BYTE* src, size_t srcSize) { size_t cnt = strnlen((LPCUTF8)src, srcSize) + 1; // assert we found a NULL terminated string at "src" if (srcSize < cnt) { return ErrOverflow; } // we won't allocate another buffer for this string val = (LPCUTF8)src; return cnt; } }; // // Specialization for SString. // SString serialization/deserialization is performed to/from a UTF8 // string. // template<> class Traits<SString> { public: static FORCEINLINE size_t raw_size(const SString & val) { StackSString s; val.ConvertToUTF8(s); // make sure to include the NULL terminator return s.GetCount() + 1; } static FORCEINLINE size_t raw_serialize(BYTE* dest, size_t destSize, const SString & val) { // instead of calling raw_size() we inline it here, so we can reuse // the UTF8 string obtained below as an argument to memcpy. StackSString s; val.ConvertToUTF8(s); // make sure to include the NULL terminator size_t cnt = s.GetCount() + 1; if (destSize < cnt) { return ErrOverflow; } memcpy_s(dest, destSize, s.GetUTF8NoConvert(), cnt); return cnt; } static FORCEINLINE size_t raw_deserialize(SString & val, const BYTE* src, size_t srcSize) { size_t cnt = strnlen((LPCUTF8)src, srcSize) + 1; // assert we found a NULL terminated string at "src" if (srcSize < cnt) { return ErrOverflow; } // a literal SString avoids a new allocation + copy SString sUtf8(SString::Utf8Literal, (LPCUTF8) src); sUtf8.ConvertToUnicode(val); return cnt; } }; #ifndef TARGET_UNIX // // Specialization for SString-derived classes (like SStrings) // template<typename T> class Traits<T, typename std::enable_if<std::is_base_of<SString, T>::value>::type> : public Traits<SString> { }; #endif // !TARGET_UNIX // // Convenience functions to allow argument type deduction // template <typename T> FORCEINLINE size_t raw_size(const T & val) { return Traits<T>::raw_size(val); } template <typename T> FORCEINLINE size_t raw_serialize(BYTE* dest, size_t destSize, const T & val) { return Traits<T>::raw_serialize(dest, destSize, val); } template <typename T> FORCEINLINE size_t raw_deserialize(T & val, const BYTE* src, size_t srcSize) { return Traits<T>::raw_deserialize(val, src, srcSize); } enum StreamBuffState { sbsOK, sbsUnrecoverable, sbsOOM = sbsUnrecoverable, }; // // OStreamBuff - Manages writing to an output buffer // class OStreamBuff { public: OStreamBuff(BYTE * _buff, size_t _buffsize) : buffsize(_buffsize) , buff(_buff) , crt(0) , sbs(sbsOK) { } template <typename T> OStreamBuff& operator << (const T & val) { if (sbs >= sbsUnrecoverable) return *this; size_t cnt = raw_serialize(buff+crt, buffsize-crt, val); if (cnt == ErrOverflow) { sbs = sbsOOM; } else { crt += cnt; } return *this; } inline size_t GetPos() const { return crt; } inline BOOL operator!() const { return sbs >= sbsUnrecoverable; } inline StreamBuffState State() const { return sbs; } private: size_t buffsize; // size of buffer BYTE* buff; // buffer to stream to size_t crt; // current offset in buffer StreamBuffState sbs; // current state }; // // OStreamBuff - Manages reading from an input buffer // class IStreamBuff { public: IStreamBuff(const BYTE* _buff, size_t _buffsize) : buffsize(_buffsize) , buff(_buff) , crt(0) , sbs(sbsOK) { } template <typename T> IStreamBuff& operator >> (T & val) { if (sbs >= sbsUnrecoverable) return *this; size_t cnt = raw_deserialize(val, buff+crt, buffsize-crt); if (cnt == ErrOverflow) { sbs = sbsOOM; } else { crt += cnt; } return *this; } inline size_t GetPos() const { return crt; } inline BOOL operator!() const { return sbs >= sbsUnrecoverable; } inline StreamBuffState State() const { return sbs; } private: size_t buffsize; // size of buffer const BYTE * buff; // buffer to read from size_t crt; // current offset in buffer StreamBuffState sbs; // current state }; } } using serialization::bin::StreamBuffState; using serialization::bin::IStreamBuff; using serialization::bin::OStreamBuff; // Callback function type used by DacStreamManager to coordinate // amount of available memory between multiple streamable data // structures (e.g. DacEENamesStreamable) typedef bool (*Reserve_Fnptr)(DWORD size, void * writeState); // // DacEENamesStreamable // Stores EE struct* -> Name mappings and streams them to a // streambuf when asked // class DacEENamesStreamable { private: // the hash map storing the interesting mappings of EE* -> Names MapSHash< TADDR, SString, NoRemoveSHashTraits < NonDacAwareSHashTraits< MapSHashTraits <TADDR, SString> > > > m_hash; Reserve_Fnptr m_reserveFn; void *m_writeState; private: // signature value in the header in stream static const DWORD sig = 0x614e4545; // "EENa" - EE Name // header in stream struct StreamHeader { DWORD sig; // 0x614e4545 == "EENa" DWORD cnt; // count of entries static const bool is_blittable = true; }; public: DacEENamesStreamable() : m_reserveFn(NULL) , m_writeState(NULL) {} // Ensures the instance is ready for caching data and later writing // its map entries to an OStreamBuff. bool PrepareStreamForWriting(Reserve_Fnptr pfn, void * writeState) { _ASSERTE(pfn != NULL && writeState != NULL); m_reserveFn = pfn; m_writeState = writeState; DWORD size = (DWORD) sizeof(StreamHeader); // notify owner to reserve space for a StreamHeader return m_reserveFn(size, m_writeState); } // Adds a new mapping from an EE struct pointer (e.g. MethodDesc*) to // its name bool AddEEName(TADDR taEE, const SString & eeName) { _ASSERTE(m_reserveFn != NULL && m_writeState != NULL); // as a micro-optimization convert to Utf8 here as both raw_size and // raw_serialize are optimized for Utf8... StackSString seeName; eeName.ConvertToUTF8(seeName); DWORD size = (DWORD)(serialization::bin::raw_size(taEE) + serialization::bin::raw_size(seeName)); // notify owner of the amount of space needed in the buffer if (m_reserveFn(size, m_writeState)) { // if there's still space cache the entry in m_hash m_hash.AddOrReplace(KeyValuePair<TADDR, SString>(taEE, seeName)); return true; } else { return false; } } // Finds an EE name from a target address of an EE struct (e.g. // MethodDesc*) bool FindEEName(TADDR taEE, SString & eeName) const { return m_hash.Lookup(taEE, &eeName) == TRUE; } void Clear() { m_hash.RemoveAll(); } // Writes a header and the hash entries to an OStreamBuff HRESULT StreamTo(OStreamBuff &out) const { StreamHeader hdr; hdr.sig = sig; hdr.cnt = (DWORD) m_hash.GetCount(); out << hdr; auto end = m_hash.End(); for (auto cur = m_hash.Begin(); end != cur; ++cur) { out << cur->Key() << cur->Value(); if (!out) return E_FAIL; } return S_OK; } // Reads a header and the hash entries from an IStreamBuff HRESULT StreamFrom(IStreamBuff &in) { StreamHeader hdr; in >> hdr; // in >> hdr.sig >> hdr.cnt; if (hdr.sig != sig) return E_FAIL; for (size_t i = 0; i < hdr.cnt; ++i) { TADDR taEE; SString eeName; in >> taEE >> eeName; if (!in) return E_FAIL; m_hash.AddOrReplace(KeyValuePair<TADDR, SString>(taEE, eeName)); } return S_OK; } }; //================================================================================ // This class enables two scenarios: // 1. When debugging a triage/mini-dump the class is initialized with a valid // buffer in taMiniMetaDataBuff. Afterwards one can call MdCacheGetEEName to // retrieve the name associated with a MethodDesc*. // 2. When generating a dump one must follow this sequence: // a. Initialize the DacStreamManager passing a valid (if the current // debugging target is a triage/mini-dump) or empty buffer (if the // current target is a live processa full or a heap dump) // b. Call PrepareStreamsForWriting() before starting enumerating any memory // c. Call MdCacheAddEEName() anytime we enumerate an EE structure of interest // d. Call EnumStreams() as the last action in the memory enumeration method. // class DacStreamManager { public: enum eReadOrWrite { eNone, // the stream doesn't exist (target is a live process/full/heap dump) eRO, // the stream exists and we've read it (target is triage/mini-dump) eWO, // the stream doesn't exist but we're creating it // (e.g. to save a minidump from the current debugging session) eRW // the stream exists but we're generating another triage/mini-dump }; static const DWORD sig = 0x6d727473; // 'strm' struct StreamsHeader { DWORD dwSig; // 0x6d727473 == "strm" DWORD dwTotalSize; // total size in bytes DWORD dwCntStreams; // number of streams (currently 1) static const bool is_blittable = true; }; DacStreamManager(TADDR miniMetaDataBuffAddress, DWORD miniMetaDataBuffSizeMax) : m_MiniMetaDataBuffAddress(miniMetaDataBuffAddress) , m_MiniMetaDataBuffSizeMax(miniMetaDataBuffSizeMax) , m_rawBuffer(NULL) , m_cbAvailBuff(0) , m_rw(eNone) , m_bStreamsRead(FALSE) , m_EENames() { Initialize(); } ~DacStreamManager() { if (m_rawBuffer != NULL) { delete [] m_rawBuffer; } } bool PrepareStreamsForWriting() { if (m_rw == eNone) m_rw = eWO; else if (m_rw == eRO) m_rw = eRW; else if (m_rw == eRW) /* nothing */; else // m_rw == eWO { // this is a second invocation from a possibly live process // clean up the map since the callstacks/exceptions may be different m_EENames.Clear(); } // update available count based on the header and footer sizes if (m_MiniMetaDataBuffSizeMax < sizeof(StreamsHeader)) return false; m_cbAvailBuff = m_MiniMetaDataBuffSizeMax - sizeof(StreamsHeader); // update available count based on each stream's initial needs if (!m_EENames.PrepareStreamForWriting(&ReserveInBuffer, this)) return false; return true; } bool MdCacheAddEEName(TADDR taEEStruct, const SString& name) { // don't cache unless we enabled "W"riting from a target that does not // already have a stream yet if (m_rw != eWO) return false; m_EENames.AddEEName(taEEStruct, name); return true; } HRESULT EnumStreams(IN CLRDataEnumMemoryFlags flags) { _ASSERTE(flags == CLRDATA_ENUM_MEM_MINI || flags == CLRDATA_ENUM_MEM_TRIAGE); _ASSERTE(m_rw == eWO || m_rw == eRW); DWORD cbWritten = 0; if (m_rw == eWO) { // only dump the stream is it wasn't already present in the target DumpAllStreams(&cbWritten); } else { cbWritten = m_MiniMetaDataBuffSizeMax; } DacEnumMemoryRegion(m_MiniMetaDataBuffAddress, cbWritten, false); DacUpdateMemoryRegion(m_MiniMetaDataBuffAddress, cbWritten, m_rawBuffer); return S_OK; } bool MdCacheGetEEName(TADDR taEEStruct, SString & eeName) { if (!m_bStreamsRead) { ReadAllStreams(); } if (m_rw == eNone || m_rw == eWO) { return false; } return m_EENames.FindEEName(taEEStruct, eeName); } private: HRESULT Initialize() { _ASSERTE(m_rw == eNone); _ASSERTE(m_rawBuffer == NULL); HRESULT hr = S_OK; StreamsHeader hdr; DacReadAll(dac_cast<TADDR>(m_MiniMetaDataBuffAddress), &hdr, sizeof(hdr), true); // when the DAC looks at a triage dump or minidump generated using // a "minimetadata" enabled DAC, buff will point to a serialized // representation of a methoddesc->method name hashmap. if (hdr.dwSig == sig) { m_rw = eRO; m_MiniMetaDataBuffSizeMax = hdr.dwTotalSize; hr = S_OK; } else // when the DAC initializes this for the case where the target is // (a) a live process, or (b) a full dump, buff will point to a // zero initialized memory region (allocated w/ VirtualAlloc) if (hdr.dwSig == 0 && hdr.dwTotalSize == 0 && hdr.dwCntStreams == 0) { hr = S_OK; } // otherwise we may have some memory corruption. treat this as // a liveprocess/full dump else { hr = S_FALSE; } BYTE * buff = new BYTE[m_MiniMetaDataBuffSizeMax]; DacReadAll(dac_cast<TADDR>(m_MiniMetaDataBuffAddress), buff, m_MiniMetaDataBuffSizeMax, true); m_rawBuffer = buff; return hr; } HRESULT DumpAllStreams(DWORD * pcbWritten) { _ASSERTE(m_rw == eWO); HRESULT hr = S_OK; OStreamBuff out(m_rawBuffer, m_MiniMetaDataBuffSizeMax); // write header StreamsHeader hdr; hdr.dwSig = sig; hdr.dwTotalSize = m_MiniMetaDataBuffSizeMax-m_cbAvailBuff; // will update hdr.dwCntStreams = 1; out << hdr; // write MethodDesc->Method name map hr = m_EENames.StreamTo(out); // wrap up the buffer whether we ecountered an error or not size_t cbWritten = out.GetPos(); cbWritten = ALIGN_UP(cbWritten, sizeof(size_t)); // patch the dwTotalSize field blitted at the beginning of the buffer ((StreamsHeader*)m_rawBuffer)->dwTotalSize = (DWORD) cbWritten; if (pcbWritten) *pcbWritten = (DWORD) cbWritten; return hr; } HRESULT ReadAllStreams() { _ASSERTE(!m_bStreamsRead); if (m_rw == eNone || m_rw == eWO) { // no streams to read... m_bStreamsRead = TRUE; return S_FALSE; } HRESULT hr = S_OK; IStreamBuff in(m_rawBuffer, m_MiniMetaDataBuffSizeMax); // read header StreamsHeader hdr; in >> hdr; _ASSERTE(hdr.dwSig == sig); _ASSERTE(hdr.dwCntStreams == 1); // read EE struct pointer -> EE name map m_EENames.Clear(); hr = m_EENames.StreamFrom(in); m_bStreamsRead = TRUE; return hr; } static bool ReserveInBuffer(DWORD size, void * writeState) { DacStreamManager * pThis = reinterpret_cast<DacStreamManager*>(writeState); if (size > pThis->m_cbAvailBuff) { return false; } else { pThis->m_cbAvailBuff -= size; return true; } } private: TADDR m_MiniMetaDataBuffAddress; // TADDR of the buffer DWORD m_MiniMetaDataBuffSizeMax; // max size of buffer BYTE * m_rawBuffer; // inproc copy of buffer DWORD m_cbAvailBuff; // available bytes in buffer eReadOrWrite m_rw; BOOL m_bStreamsRead; DacEENamesStreamable m_EENames; }; #endif // FEATURE_MINIMETADATA_IN_TRIAGEDUMPS //---------------------------------------------------------------------------- // // ClrDataAccess. // //---------------------------------------------------------------------------- LONG ClrDataAccess::s_procInit; ClrDataAccess::ClrDataAccess(ICorDebugDataTarget * pTarget, ICLRDataTarget * pLegacyTarget/*=0*/) { SUPPORTS_DAC_HOST_ONLY; // ctor does no marshalling - don't check with DacCop /* * Stash the various forms of the new ICorDebugDataTarget interface */ m_pTarget = pTarget; m_pTarget->AddRef(); HRESULT hr; hr = m_pTarget->QueryInterface(__uuidof(ICorDebugMutableDataTarget), (void**)&m_pMutableTarget); if (hr != S_OK) { // Create a target which always fails the write requests with CORDBG_E_TARGET_READONLY m_pMutableTarget = new ReadOnlyDataTargetFacade(); m_pMutableTarget->AddRef(); } /* * If we have a legacy target, it means we're providing compatibility for code that used * the old ICLRDataTarget interfaces. There are still a few things (like metadata location, * GetImageBase, and VirtualAlloc) that the implementation may use which we haven't superseded * in ICorDebugDataTarget, so we still need access to the old target interfaces. * Any functionality that does exist in ICorDebugDataTarget is accessed from that interface * using the DataTargetAdapter on top of the legacy interface (to unify the calling code). * Eventually we may expose all functionality we need using ICorDebug (possibly a private * interface for things like VirtualAlloc), at which point we can stop using the legacy interfaces * completely (except in the DataTargetAdapter). */ m_pLegacyTarget = NULL; m_pLegacyTarget2 = NULL; m_pLegacyTarget3 = NULL; m_legacyMetaDataLocator = NULL; m_target3 = NULL; if (pLegacyTarget != NULL) { m_pLegacyTarget = pLegacyTarget; m_pLegacyTarget->AddRef(); m_pLegacyTarget->QueryInterface(__uuidof(ICLRDataTarget2), (void**)&m_pLegacyTarget2); m_pLegacyTarget->QueryInterface(__uuidof(ICLRDataTarget3), (void**)&m_pLegacyTarget3); if (pLegacyTarget->QueryInterface(__uuidof(ICLRMetadataLocator), (void**)&m_legacyMetaDataLocator) != S_OK) { // The debugger doesn't implement IMetadataLocator. Use // IXCLRDataTarget3 if that exists. Otherwise we don't need it. pLegacyTarget->QueryInterface(__uuidof(IXCLRDataTarget3), (void**)&m_target3); } } m_globalBase = 0; m_refs = 1; m_instanceAge = 0; m_debugMode = GetEnvironmentVariableA("MSCORDACWKS_DEBUG", NULL, 0) != 0; m_enumMemCb = NULL; m_updateMemCb = NULL; m_enumMemFlags = (CLRDataEnumMemoryFlags)-1; // invalid m_jitNotificationTable = NULL; m_gcNotificationTable = NULL; #ifdef FEATURE_MINIMETADATA_IN_TRIAGEDUMPS m_streams = NULL; #endif // FEATURE_MINIMETADATA_IN_TRIAGEDUMPS // Target consistency checks are disabled by default. // See code:ClrDataAccess::SetTargetConsistencyChecks for details. m_fEnableTargetConsistencyAsserts = false; #ifdef _DEBUG if (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_DbgDACEnableAssert)) { m_fEnableTargetConsistencyAsserts = true; } // Verification asserts are disabled by default because some debuggers (cdb/windbg) probe likely locations // for DAC and having this assert pop up all the time can be annoying. We let derived classes enable // this if they want. It can also be overridden at run-time with COMPlus_DbgDACAssertOnMismatch, // see ClrDataAccess::VerifyDlls for details. m_fEnableDllVerificationAsserts = false; #endif } ClrDataAccess::~ClrDataAccess(void) { SUPPORTS_DAC_HOST_ONLY; #ifdef FEATURE_MINIMETADATA_IN_TRIAGEDUMPS if (m_streams) { delete m_streams; } #endif // FEATURE_MINIMETADATA_IN_TRIAGEDUMPS delete [] m_jitNotificationTable; if (m_pLegacyTarget) { m_pLegacyTarget->Release(); } if (m_pLegacyTarget2) { m_pLegacyTarget2->Release(); } if (m_pLegacyTarget3) { m_pLegacyTarget3->Release(); } if (m_legacyMetaDataLocator) { m_legacyMetaDataLocator->Release(); } if (m_target3) { m_target3->Release(); } m_pTarget->Release(); m_pMutableTarget->Release(); } STDMETHODIMP ClrDataAccess::QueryInterface(THIS_ IN REFIID interfaceId, OUT PVOID* iface) { void* ifaceRet; if (IsEqualIID(interfaceId, IID_IUnknown) || IsEqualIID(interfaceId, __uuidof(IXCLRDataProcess)) || IsEqualIID(interfaceId, __uuidof(IXCLRDataProcess2))) { ifaceRet = static_cast<IXCLRDataProcess2*>(this); } else if (IsEqualIID(interfaceId, __uuidof(ICLRDataEnumMemoryRegions))) { ifaceRet = static_cast<ICLRDataEnumMemoryRegions*>(this); } else if (IsEqualIID(interfaceId, __uuidof(ISOSDacInterface))) { ifaceRet = static_cast<ISOSDacInterface*>(this); } else if (IsEqualIID(interfaceId, __uuidof(ISOSDacInterface2))) { ifaceRet = static_cast<ISOSDacInterface2*>(this); } else if (IsEqualIID(interfaceId, __uuidof(ISOSDacInterface3))) { ifaceRet = static_cast<ISOSDacInterface3*>(this); } else if (IsEqualIID(interfaceId, __uuidof(ISOSDacInterface4))) { ifaceRet = static_cast<ISOSDacInterface4*>(this); } else if (IsEqualIID(interfaceId, __uuidof(ISOSDacInterface5))) { ifaceRet = static_cast<ISOSDacInterface5*>(this); } else if (IsEqualIID(interfaceId, __uuidof(ISOSDacInterface6))) { ifaceRet = static_cast<ISOSDacInterface6*>(this); } else if (IsEqualIID(interfaceId, __uuidof(ISOSDacInterface7))) { ifaceRet = static_cast<ISOSDacInterface7*>(this); } else if (IsEqualIID(interfaceId, __uuidof(ISOSDacInterface8))) { ifaceRet = static_cast<ISOSDacInterface8*>(this); } else if (IsEqualIID(interfaceId, __uuidof(ISOSDacInterface9))) { ifaceRet = static_cast<ISOSDacInterface9*>(this); } else if (IsEqualIID(interfaceId, __uuidof(ISOSDacInterface10))) { ifaceRet = static_cast<ISOSDacInterface10*>(this); } else if (IsEqualIID(interfaceId, __uuidof(ISOSDacInterface11))) { ifaceRet = static_cast<ISOSDacInterface11*>(this); } else { *iface = NULL; return E_NOINTERFACE; } AddRef(); *iface = ifaceRet; return S_OK; } STDMETHODIMP_(ULONG) ClrDataAccess::AddRef(THIS) { return InterlockedIncrement(&m_refs); } STDMETHODIMP_(ULONG) ClrDataAccess::Release(THIS) { SUPPORTS_DAC_HOST_ONLY; LONG newRefs = InterlockedDecrement(&m_refs); if (newRefs == 0) { delete this; } return newRefs; } HRESULT STDMETHODCALLTYPE ClrDataAccess::Flush(void) { SUPPORTS_DAC_HOST_ONLY; // // Free MD import objects. // m_mdImports.Flush(); // Free instance memory. m_instances.Flush(); // When the host instance cache is flushed we // update the instance age count so that // all child objects automatically become // invalid. This prevents them from using // any pointers they've kept to host instances // which are now gone. m_instanceAge++; return S_OK; } HRESULT STDMETHODCALLTYPE ClrDataAccess::StartEnumTasks( /* [out] */ CLRDATA_ENUM* handle) { HRESULT status; DAC_ENTER(); EX_TRY { if (ThreadStore::s_pThreadStore) { Thread* thread = ThreadStore::GetAllThreadList(NULL, 0, 0); *handle = TO_CDENUM(thread); status = *handle ? S_OK : S_FALSE; } else { status = S_FALSE; } } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::EnumTask( /* [in, out] */ CLRDATA_ENUM* handle, /* [out] */ IXCLRDataTask **task) { HRESULT status; DAC_ENTER(); EX_TRY { if (*handle) { Thread* thread = FROM_CDENUM(Thread, *handle); *task = new (nothrow) ClrDataTask(this, thread); if (*task) { thread = ThreadStore::GetAllThreadList(thread, 0, 0); *handle = TO_CDENUM(thread); status = S_OK; } else { status = E_OUTOFMEMORY; } } else { status = S_FALSE; } } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::EndEnumTasks( /* [in] */ CLRDATA_ENUM handle) { HRESULT status; DAC_ENTER(); EX_TRY { // Enumerator holds no resources. status = S_OK; } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::GetTaskByOSThreadID( /* [in] */ ULONG32 osThreadID, /* [out] */ IXCLRDataTask **task) { HRESULT status; DAC_ENTER(); EX_TRY { status = E_INVALIDARG; Thread* thread = DacGetThread(osThreadID); if (thread != NULL) { *task = new (nothrow) ClrDataTask(this, thread); status = *task ? S_OK : E_OUTOFMEMORY; } } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::GetTaskByUniqueID( /* [in] */ ULONG64 uniqueID, /* [out] */ IXCLRDataTask **task) { HRESULT status; DAC_ENTER(); EX_TRY { Thread* thread = FindClrThreadByTaskId(uniqueID); if (thread) { *task = new (nothrow) ClrDataTask(this, thread); status = *task ? S_OK : E_OUTOFMEMORY; } else { status = E_INVALIDARG; } } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::GetFlags( /* [out] */ ULONG32 *flags) { HRESULT status; DAC_ENTER(); EX_TRY { // XXX Microsoft - GC check. *flags = CLRDATA_PROCESS_DEFAULT; status = S_OK; } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::IsSameObject( /* [in] */ IXCLRDataProcess* process) { HRESULT status; DAC_ENTER(); EX_TRY { status = m_pTarget == ((ClrDataAccess*)process)->m_pTarget ? S_OK : S_FALSE; } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::GetManagedObject( /* [out] */ IXCLRDataValue **value) { HRESULT status; DAC_ENTER(); EX_TRY { // XXX Microsoft. status = E_NOTIMPL; } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::GetDesiredExecutionState( /* [out] */ ULONG32 *state) { HRESULT status; DAC_ENTER(); EX_TRY { // XXX Microsoft. status = E_NOTIMPL; } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::SetDesiredExecutionState( /* [in] */ ULONG32 state) { HRESULT status; DAC_ENTER(); EX_TRY { // XXX Microsoft. status = E_NOTIMPL; } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::GetAddressType( /* [in] */ CLRDATA_ADDRESS address, /* [out] */ CLRDataAddressType* type) { HRESULT status; DAC_ENTER(); EX_TRY { // The only thing that constitutes a failure is some // dac failure while checking things. status = S_OK; TADDR taAddr = CLRDATA_ADDRESS_TO_TADDR(address); if (IsPossibleCodeAddress(taAddr) == S_OK) { if (ExecutionManager::IsManagedCode(taAddr)) { *type = CLRDATA_ADDRESS_MANAGED_METHOD; goto Exit; } if (StubManager::IsStub(taAddr)) { *type = CLRDATA_ADDRESS_RUNTIME_UNMANAGED_STUB; goto Exit; } } *type = CLRDATA_ADDRESS_UNRECOGNIZED; Exit: ; } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::GetRuntimeNameByAddress( /* [in] */ CLRDATA_ADDRESS address, /* [in] */ ULONG32 flags, /* [in] */ ULONG32 bufLen, /* [out] */ ULONG32 *symbolLen, /* [size_is][out] */ _Out_writes_bytes_opt_(bufLen) WCHAR symbolBuf[ ], /* [out] */ CLRDATA_ADDRESS* displacement) { HRESULT status; DAC_ENTER(); EX_TRY { #ifdef TARGET_ARM address &= ~THUMB_CODE; //workaround for windbg passing in addresses with the THUMB mode bit set #endif status = RawGetMethodName(address, flags, bufLen, symbolLen, symbolBuf, displacement); } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::StartEnumAppDomains( /* [out] */ CLRDATA_ENUM* handle) { HRESULT status; DAC_ENTER(); EX_TRY { AppDomainIterator* iter = new (nothrow) AppDomainIterator(FALSE); if (iter) { *handle = TO_CDENUM(iter); status = S_OK; } else { status = E_OUTOFMEMORY; } } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::EnumAppDomain( /* [in, out] */ CLRDATA_ENUM* handle, /* [out] */ IXCLRDataAppDomain **appDomain) { HRESULT status; DAC_ENTER(); EX_TRY { AppDomainIterator* iter = FROM_CDENUM(AppDomainIterator, *handle); if (iter->Next()) { *appDomain = new (nothrow) ClrDataAppDomain(this, iter->GetDomain()); status = *appDomain ? S_OK : E_OUTOFMEMORY; } else { status = S_FALSE; } } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::EndEnumAppDomains( /* [in] */ CLRDATA_ENUM handle) { HRESULT status; DAC_ENTER(); EX_TRY { AppDomainIterator* iter = FROM_CDENUM(AppDomainIterator, handle); delete iter; status = S_OK; } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::GetAppDomainByUniqueID( /* [in] */ ULONG64 uniqueID, /* [out] */ IXCLRDataAppDomain **appDomain) { HRESULT status; DAC_ENTER(); EX_TRY { if (uniqueID != DefaultADID) { status = E_INVALIDARG; } else { *appDomain = new (nothrow) ClrDataAppDomain(this, AppDomain::GetCurrentDomain()); status = *appDomain ? S_OK : E_OUTOFMEMORY; } } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::StartEnumAssemblies( /* [out] */ CLRDATA_ENUM* handle) { HRESULT status; DAC_ENTER(); EX_TRY { ProcessModIter* iter = new (nothrow) ProcessModIter; if (iter) { *handle = TO_CDENUM(iter); status = S_OK; } else { status = E_OUTOFMEMORY; } } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::EnumAssembly( /* [in, out] */ CLRDATA_ENUM* handle, /* [out] */ IXCLRDataAssembly **assembly) { HRESULT status; DAC_ENTER(); EX_TRY { ProcessModIter* iter = FROM_CDENUM(ProcessModIter, *handle); Assembly* assem; if ((assem = iter->NextAssem())) { *assembly = new (nothrow) ClrDataAssembly(this, assem); status = *assembly ? S_OK : E_OUTOFMEMORY; } else { status = S_FALSE; } } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::EndEnumAssemblies( /* [in] */ CLRDATA_ENUM handle) { HRESULT status; DAC_ENTER(); EX_TRY { ProcessModIter* iter = FROM_CDENUM(ProcessModIter, handle); delete iter; status = S_OK; } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::StartEnumModules( /* [out] */ CLRDATA_ENUM* handle) { HRESULT status; DAC_ENTER(); EX_TRY { ProcessModIter* iter = new (nothrow) ProcessModIter; if (iter) { *handle = TO_CDENUM(iter); status = S_OK; } else { status = E_OUTOFMEMORY; } } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::EnumModule( /* [in, out] */ CLRDATA_ENUM* handle, /* [out] */ IXCLRDataModule **mod) { HRESULT status; DAC_ENTER(); EX_TRY { ProcessModIter* iter = FROM_CDENUM(ProcessModIter, *handle); Module* curMod; if ((curMod = iter->NextModule())) { *mod = new (nothrow) ClrDataModule(this, curMod); status = *mod ? S_OK : E_OUTOFMEMORY; } else { status = S_FALSE; } } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::EndEnumModules( /* [in] */ CLRDATA_ENUM handle) { HRESULT status; DAC_ENTER(); EX_TRY { ProcessModIter* iter = FROM_CDENUM(ProcessModIter, handle); delete iter; status = S_OK; } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::GetModuleByAddress( /* [in] */ CLRDATA_ADDRESS address, /* [out] */ IXCLRDataModule** mod) { HRESULT status; DAC_ENTER(); EX_TRY { ProcessModIter modIter; Module* modDef; while ((modDef = modIter.NextModule())) { TADDR base; ULONG32 length; PEAssembly* pPEAssembly = modDef->GetPEAssembly(); if ((base = PTR_TO_TADDR(pPEAssembly->GetLoadedImageContents(&length)))) { if (TO_CDADDR(base) <= address && TO_CDADDR(base + length) > address) { break; } } } if (modDef) { *mod = new (nothrow) ClrDataModule(this, modDef); status = *mod ? S_OK : E_OUTOFMEMORY; } else { status = S_FALSE; } } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::StartEnumMethodDefinitionsByAddress( /* [in] */ CLRDATA_ADDRESS address, /* [out] */ CLRDATA_ENUM *handle) { HRESULT status; DAC_ENTER(); EX_TRY { ProcessModIter modIter; Module* modDef; while ((modDef = modIter.NextModule())) { TADDR base; ULONG32 length; PEAssembly* assembly = modDef->GetPEAssembly(); if ((base = PTR_TO_TADDR(assembly->GetLoadedImageContents(&length)))) { if (TO_CDADDR(base) <= address && TO_CDADDR(base + length) > address) { break; } } } status = EnumMethodDefinitions:: CdStart(modDef, true, address, handle); } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::EnumMethodDefinitionByAddress( /* [out][in] */ CLRDATA_ENUM* handle, /* [out] */ IXCLRDataMethodDefinition **method) { HRESULT status; DAC_ENTER(); EX_TRY { status = EnumMethodDefinitions::CdNext(this, handle, method); } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::EndEnumMethodDefinitionsByAddress( /* [in] */ CLRDATA_ENUM handle) { HRESULT status; DAC_ENTER(); EX_TRY { status = EnumMethodDefinitions::CdEnd(handle); } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::StartEnumMethodInstancesByAddress( /* [in] */ CLRDATA_ADDRESS address, /* [in] */ IXCLRDataAppDomain* appDomain, /* [out] */ CLRDATA_ENUM *handle) { HRESULT status; DAC_ENTER(); EX_TRY { MethodDesc* methodDesc; *handle = 0; status = S_FALSE; TADDR taddr; if( (status = TRY_CLRDATA_ADDRESS_TO_TADDR(address, &taddr)) != S_OK ) { goto Exit; } if (IsPossibleCodeAddress(taddr) != S_OK) { goto Exit; } methodDesc = ExecutionManager::GetCodeMethodDesc(taddr); if (!methodDesc) { goto Exit; } status = EnumMethodInstances::CdStart(methodDesc, appDomain, handle); Exit: ; } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::EnumMethodInstanceByAddress( /* [out][in] */ CLRDATA_ENUM* handle, /* [out] */ IXCLRDataMethodInstance **method) { HRESULT status; DAC_ENTER(); EX_TRY { status = EnumMethodInstances::CdNext(this, handle, method); } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::EndEnumMethodInstancesByAddress( /* [in] */ CLRDATA_ENUM handle) { HRESULT status; DAC_ENTER(); EX_TRY { status = EnumMethodInstances::CdEnd(handle); } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::GetDataByAddress( /* [in] */ CLRDATA_ADDRESS address, /* [in] */ ULONG32 flags, /* [in] */ IXCLRDataAppDomain* appDomain, /* [in] */ IXCLRDataTask* tlsTask, /* [in] */ ULONG32 bufLen, /* [out] */ ULONG32 *nameLen, /* [size_is][out] */ _Out_writes_to_opt_(bufLen, *nameLen) WCHAR nameBuf[ ], /* [out] */ IXCLRDataValue **value, /* [out] */ CLRDATA_ADDRESS *displacement) { HRESULT status; if (flags != 0) { return E_INVALIDARG; } DAC_ENTER(); EX_TRY { // XXX Microsoft. status = E_NOTIMPL; } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::GetExceptionStateByExceptionRecord( /* [in] */ EXCEPTION_RECORD64 *record, /* [out] */ IXCLRDataExceptionState **exception) { HRESULT status; DAC_ENTER(); EX_TRY { // XXX Microsoft. status = E_NOTIMPL; } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::TranslateExceptionRecordToNotification( /* [in] */ EXCEPTION_RECORD64 *record, /* [in] */ IXCLRDataExceptionNotification *notify) { HRESULT status = E_FAIL; ClrDataModule* pubModule = NULL; ClrDataMethodInstance* pubMethodInst = NULL; ClrDataExceptionState* pubExState = NULL; GcEvtArgs pubGcEvtArgs; ULONG32 notifyType = 0; DWORD catcherNativeOffset = 0; TADDR nativeCodeLocation = NULL; DAC_ENTER(); EX_TRY { // // We cannot hold the dac lock while calling // out as the external code can do arbitrary things. // Instead we make a pass over the exception // information and create all necessary objects. // We then leave the lock and make the callbac. // TADDR exInfo[EXCEPTION_MAXIMUM_PARAMETERS]; for (UINT i = 0; i < EXCEPTION_MAXIMUM_PARAMETERS; i++) { exInfo[i] = TO_TADDR(record->ExceptionInformation[i]); } notifyType = DACNotify::GetType(exInfo); switch(notifyType) { case DACNotify::MODULE_LOAD_NOTIFICATION: { TADDR modulePtr; if (DACNotify::ParseModuleLoadNotification(exInfo, modulePtr)) { Module* clrModule = PTR_Module(modulePtr); pubModule = new (nothrow) ClrDataModule(this, clrModule); if (pubModule == NULL) { status = E_OUTOFMEMORY; } else { status = S_OK; } } break; } case DACNotify::MODULE_UNLOAD_NOTIFICATION: { TADDR modulePtr; if (DACNotify::ParseModuleUnloadNotification(exInfo, modulePtr)) { Module* clrModule = PTR_Module(modulePtr); pubModule = new (nothrow) ClrDataModule(this, clrModule); if (pubModule == NULL) { status = E_OUTOFMEMORY; } else { status = S_OK; } } break; } case DACNotify::JIT_NOTIFICATION2: { TADDR methodDescPtr; if(DACNotify::ParseJITNotification(exInfo, methodDescPtr, nativeCodeLocation)) { // Try and find the right appdomain MethodDesc* methodDesc = PTR_MethodDesc(methodDescPtr); BaseDomain* baseDomain = methodDesc->GetDomain(); AppDomain* appDomain = NULL; if (baseDomain->IsAppDomain()) { appDomain = PTR_AppDomain(PTR_HOST_TO_TADDR(baseDomain)); } else { // Find a likely domain, because it's the shared domain. AppDomainIterator adi(FALSE); appDomain = adi.GetDomain(); } pubMethodInst = new (nothrow) ClrDataMethodInstance(this, appDomain, methodDesc); if (pubMethodInst == NULL) { status = E_OUTOFMEMORY; } else { status = S_OK; } } break; } case DACNotify::EXCEPTION_NOTIFICATION: { TADDR threadPtr; if (DACNotify::ParseExceptionNotification(exInfo, threadPtr)) { // Translation can only occur at the time of // receipt of the notify exception, so we assume // that the Thread's current exception state // is the state we want. status = ClrDataExceptionState:: NewFromThread(this, PTR_Thread(threadPtr), &pubExState, NULL); } break; } case DACNotify::GC_NOTIFICATION: { if (DACNotify::ParseGCNotification(exInfo, pubGcEvtArgs)) { status = S_OK; } break; } case DACNotify::CATCH_ENTER_NOTIFICATION: { TADDR methodDescPtr; if (DACNotify::ParseExceptionCatcherEnterNotification(exInfo, methodDescPtr, catcherNativeOffset)) { // Try and find the right appdomain MethodDesc* methodDesc = PTR_MethodDesc(methodDescPtr); BaseDomain* baseDomain = methodDesc->GetDomain(); AppDomain* appDomain = NULL; if (baseDomain->IsAppDomain()) { appDomain = PTR_AppDomain(PTR_HOST_TO_TADDR(baseDomain)); } else { // Find a likely domain, because it's the shared domain. AppDomainIterator adi(FALSE); appDomain = adi.GetDomain(); } pubMethodInst = new (nothrow) ClrDataMethodInstance(this, appDomain, methodDesc); if (pubMethodInst == NULL) { status = E_OUTOFMEMORY; } else { status = S_OK; } } break; } default: status = E_INVALIDARG; break; } } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); if (status == S_OK) { IXCLRDataExceptionNotification2* notify2; if (notify->QueryInterface(__uuidof(IXCLRDataExceptionNotification2), (void**)&notify2) != S_OK) { notify2 = NULL; } IXCLRDataExceptionNotification3* notify3; if (notify->QueryInterface(__uuidof(IXCLRDataExceptionNotification3), (void**)&notify3) != S_OK) { notify3 = NULL; } IXCLRDataExceptionNotification4* notify4; if (notify->QueryInterface(__uuidof(IXCLRDataExceptionNotification4), (void**)&notify4) != S_OK) { notify4 = NULL; } IXCLRDataExceptionNotification5* notify5; if (notify->QueryInterface(__uuidof(IXCLRDataExceptionNotification5), (void**)&notify5) != S_OK) { notify5 = NULL; } switch(notifyType) { case DACNotify::MODULE_LOAD_NOTIFICATION: notify->OnModuleLoaded(pubModule); break; case DACNotify::MODULE_UNLOAD_NOTIFICATION: notify->OnModuleUnloaded(pubModule); break; case DACNotify::JIT_NOTIFICATION2: notify->OnCodeGenerated(pubMethodInst); if (notify5) { notify5->OnCodeGenerated2(pubMethodInst, TO_CDADDR(nativeCodeLocation)); } break; case DACNotify::EXCEPTION_NOTIFICATION: if (notify2) { notify2->OnException(pubExState); } else { status = E_INVALIDARG; } break; case DACNotify::GC_NOTIFICATION: if (notify3) { notify3->OnGcEvent(pubGcEvtArgs); } break; case DACNotify::CATCH_ENTER_NOTIFICATION: if (notify4) { notify4->ExceptionCatcherEnter(pubMethodInst, catcherNativeOffset); } break; default: // notifyType has already been validated. _ASSERTE(FALSE); break; } if (notify2) { notify2->Release(); } if (notify3) { notify3->Release(); } if (notify4) { notify4->Release(); } if (notify5) { notify5->Release(); } } if (pubModule) { pubModule->Release(); } if (pubMethodInst) { pubMethodInst->Release(); } if (pubExState) { pubExState->Release(); } return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::CreateMemoryValue( /* [in] */ IXCLRDataAppDomain* appDomain, /* [in] */ IXCLRDataTask* tlsTask, /* [in] */ IXCLRDataTypeInstance* type, /* [in] */ CLRDATA_ADDRESS addr, /* [out] */ IXCLRDataValue** value) { HRESULT status; DAC_ENTER(); EX_TRY { AppDomain* dacDomain; Thread* dacThread; TypeHandle dacType; ULONG32 flags; NativeVarLocation loc; dacDomain = ((ClrDataAppDomain*)appDomain)->GetAppDomain(); if (tlsTask) { dacThread = ((ClrDataTask*)tlsTask)->GetThread(); } else { dacThread = NULL; } dacType = ((ClrDataTypeInstance*)type)->GetTypeHandle(); flags = GetTypeFieldValueFlags(dacType, NULL, 0, false); loc.addr = addr; loc.size = dacType.GetSize(); loc.contextReg = false; *value = new (nothrow) ClrDataValue(this, dacDomain, dacThread, flags, dacType, addr, 1, &loc); status = *value ? S_OK : E_OUTOFMEMORY; } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::SetAllTypeNotifications( /* [in] */ IXCLRDataModule* mod, /* [in] */ ULONG32 flags) { HRESULT status; DAC_ENTER(); EX_TRY { // XXX Microsoft. status = E_NOTIMPL; } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::SetAllCodeNotifications( /* [in] */ IXCLRDataModule* mod, /* [in] */ ULONG32 flags) { HRESULT status; DAC_ENTER(); EX_TRY { status = E_FAIL; if (!IsValidMethodCodeNotification(flags)) { status = E_INVALIDARG; } else { JITNotifications jn(GetHostJitNotificationTable()); if (!jn.IsActive()) { status = E_OUTOFMEMORY; } else { BOOL changedTable; TADDR modulePtr = mod ? PTR_HOST_TO_TADDR(((ClrDataModule*)mod)->GetModule()) : NULL; if (jn.SetAllNotifications(modulePtr, flags, &changedTable)) { if (!changedTable || (changedTable && jn.UpdateOutOfProcTable())) { status = S_OK; } } } } } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::GetTypeNotifications( /* [in] */ ULONG32 numTokens, /* [in, size_is(numTokens)] */ IXCLRDataModule* mods[], /* [in] */ IXCLRDataModule* singleMod, /* [in, size_is(numTokens)] */ mdTypeDef tokens[], /* [out, size_is(numTokens)] */ ULONG32 flags[]) { HRESULT status; DAC_ENTER(); EX_TRY { // XXX Microsoft. status = E_NOTIMPL; } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::SetTypeNotifications( /* [in] */ ULONG32 numTokens, /* [in, size_is(numTokens)] */ IXCLRDataModule* mods[], /* [in] */ IXCLRDataModule* singleMod, /* [in, size_is(numTokens)] */ mdTypeDef tokens[], /* [in, size_is(numTokens)] */ ULONG32 flags[], /* [in] */ ULONG32 singleFlags) { HRESULT status; DAC_ENTER(); EX_TRY { // XXX Microsoft. status = E_NOTIMPL; } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::GetCodeNotifications( /* [in] */ ULONG32 numTokens, /* [in, size_is(numTokens)] */ IXCLRDataModule* mods[], /* [in] */ IXCLRDataModule* singleMod, /* [in, size_is(numTokens)] */ mdMethodDef tokens[], /* [out, size_is(numTokens)] */ ULONG32 flags[]) { HRESULT status; DAC_ENTER(); EX_TRY { if ((flags == NULL || tokens == NULL) || (mods == NULL && singleMod == NULL) || (mods != NULL && singleMod != NULL)) { status = E_INVALIDARG; } else { JITNotifications jn(GetHostJitNotificationTable()); if (!jn.IsActive()) { status = E_OUTOFMEMORY; } else { TADDR modulePtr = NULL; if (singleMod) { modulePtr = PTR_HOST_TO_TADDR(((ClrDataModule*)singleMod)-> GetModule()); } for (ULONG32 i = 0; i < numTokens; i++) { if (singleMod == NULL) { modulePtr = PTR_HOST_TO_TADDR(((ClrDataModule*)mods[i])-> GetModule()); } USHORT jt = jn.Requested(modulePtr, tokens[i]); flags[i] = jt; } status = S_OK; } } } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::SetCodeNotifications( /* [in] */ ULONG32 numTokens, /* [in, size_is(numTokens)] */ IXCLRDataModule* mods[], /* [in] */ IXCLRDataModule* singleMod, /* [in, size_is(numTokens)] */ mdMethodDef tokens[], /* [in, size_is(numTokens)] */ ULONG32 flags[], /* [in] */ ULONG32 singleFlags) { HRESULT status = E_UNEXPECTED; DAC_ENTER(); EX_TRY { if ((tokens == NULL) || (mods == NULL && singleMod == NULL) || (mods != NULL && singleMod != NULL)) { status = E_INVALIDARG; } else { JITNotifications jn(GetHostJitNotificationTable()); if (!jn.IsActive() || numTokens > jn.GetTableSize()) { status = E_OUTOFMEMORY; } else { BOOL changedTable = FALSE; // Are flags valid? if (flags) { for (ULONG32 check = 0; check < numTokens; check++) { if (!IsValidMethodCodeNotification(flags[check])) { status = E_INVALIDARG; goto Exit; } } } else if (!IsValidMethodCodeNotification(singleFlags)) { status = E_INVALIDARG; goto Exit; } TADDR modulePtr = NULL; if (singleMod) { modulePtr = PTR_HOST_TO_TADDR(((ClrDataModule*)singleMod)-> GetModule()); } for (ULONG32 i = 0; i < numTokens; i++) { if (singleMod == NULL) { modulePtr = PTR_HOST_TO_TADDR(((ClrDataModule*)mods[i])-> GetModule()); } USHORT curFlags = jn.Requested(modulePtr, tokens[i]); USHORT setFlags = (USHORT)(flags ? flags[i] : singleFlags); if (curFlags != setFlags) { if (!jn.SetNotification(modulePtr, tokens[i], setFlags)) { status = E_FAIL; goto Exit; } changedTable = TRUE; } } if (!changedTable || (changedTable && jn.UpdateOutOfProcTable())) { status = S_OK; } } } Exit: ; } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT ClrDataAccess::GetOtherNotificationFlags( /* [out] */ ULONG32* flags) { HRESULT status; DAC_ENTER(); EX_TRY { *flags = g_dacNotificationFlags; status = S_OK; } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT ClrDataAccess::SetOtherNotificationFlags( /* [in] */ ULONG32 flags) { HRESULT status; if ((flags & ~(CLRDATA_NOTIFY_ON_MODULE_LOAD | CLRDATA_NOTIFY_ON_MODULE_UNLOAD | CLRDATA_NOTIFY_ON_EXCEPTION | CLRDATA_NOTIFY_ON_EXCEPTION_CATCH_ENTER)) != 0) { return E_INVALIDARG; } DAC_ENTER(); EX_TRY { g_dacNotificationFlags = flags; status = S_OK; } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } enum { STUB_BUF_FLAGS_START, STUB_BUF_METHOD_JITTED, STUB_BUF_FRAME_PUSHED, STUB_BUF_STUB_MANAGER_PUSHED, STUB_BUF_FLAGS_END, }; union STUB_BUF { CLRDATA_FOLLOW_STUB_BUFFER apiBuf; struct { ULONG64 flags; ULONG64 addr; ULONG64 arg1; } u; }; HRESULT ClrDataAccess::FollowStubStep( /* [in] */ Thread* thread, /* [in] */ ULONG32 inFlags, /* [in] */ TADDR inAddr, /* [in] */ union STUB_BUF* inBuffer, /* [out] */ TADDR* outAddr, /* [out] */ union STUB_BUF* outBuffer, /* [out] */ ULONG32* outFlags) { TraceDestination trace; bool traceDone = false; BYTE* retAddr; T_CONTEXT localContext; REGDISPLAY regDisp; MethodDesc* methodDesc; ZeroMemory(outBuffer, sizeof(*outBuffer)); if (inBuffer) { switch(inBuffer->u.flags) { case STUB_BUF_METHOD_JITTED: if (inAddr != GFN_TADDR(DACNotifyCompilationFinished)) { return E_INVALIDARG; } // It's possible that this notification is // for a different method, so double-check // and recycle the notification if necessary. methodDesc = PTR_MethodDesc(CORDB_ADDRESS_TO_TADDR(inBuffer->u.addr)); if (methodDesc->HasNativeCode()) { *outAddr = methodDesc->GetNativeCode(); *outFlags = CLRDATA_FOLLOW_STUB_EXIT; return S_OK; } // We didn't end up with native code so try again. trace.InitForUnjittedMethod(methodDesc); traceDone = true; break; case STUB_BUF_FRAME_PUSHED: if (!thread || inAddr != inBuffer->u.addr) { return E_INVALIDARG; } trace.InitForFramePush(CORDB_ADDRESS_TO_TADDR(inBuffer->u.addr)); DacGetThreadContext(thread, &localContext); thread->FillRegDisplay(&regDisp, &localContext); if (!thread->GetFrame()-> TraceFrame(thread, TRUE, &trace, &regDisp)) { return E_FAIL; } traceDone = true; break; case STUB_BUF_STUB_MANAGER_PUSHED: if (!thread || inAddr != inBuffer->u.addr || !inBuffer->u.arg1) { return E_INVALIDARG; } trace.InitForManagerPush(CORDB_ADDRESS_TO_TADDR(inBuffer->u.addr), PTR_StubManager(CORDB_ADDRESS_TO_TADDR(inBuffer->u.arg1))); DacGetThreadContext(thread, &localContext); if (!trace.GetStubManager()-> TraceManager(thread, &trace, &localContext, &retAddr)) { return E_FAIL; } traceDone = true; break; default: return E_INVALIDARG; } } if ((!traceDone && !StubManager::TraceStub(inAddr, &trace)) || !StubManager::FollowTrace(&trace)) { return E_NOINTERFACE; } switch(trace.GetTraceType()) { case TRACE_UNMANAGED: case TRACE_MANAGED: // We've hit non-stub code so we're done. *outAddr = trace.GetAddress(); *outFlags = CLRDATA_FOLLOW_STUB_EXIT; break; case TRACE_UNJITTED_METHOD: // The stub causes jitting, so return // the address of the jit-complete routine // so that the real native address can // be picked up once the JIT is done. methodDesc = trace.GetMethodDesc(); *outAddr = GFN_TADDR(DACNotifyCompilationFinished); outBuffer->u.flags = STUB_BUF_METHOD_JITTED; outBuffer->u.addr = PTR_HOST_TO_TADDR(methodDesc); *outFlags = CLRDATA_FOLLOW_STUB_INTERMEDIATE; break; case TRACE_FRAME_PUSH: if (!thread) { return E_INVALIDARG; } *outAddr = trace.GetAddress(); outBuffer->u.flags = STUB_BUF_FRAME_PUSHED; outBuffer->u.addr = trace.GetAddress(); *outFlags = CLRDATA_FOLLOW_STUB_INTERMEDIATE; break; case TRACE_MGR_PUSH: if (!thread) { return E_INVALIDARG; } *outAddr = trace.GetAddress(); outBuffer->u.flags = STUB_BUF_STUB_MANAGER_PUSHED; outBuffer->u.addr = trace.GetAddress(); outBuffer->u.arg1 = PTR_HOST_TO_TADDR(trace.GetStubManager()); *outFlags = CLRDATA_FOLLOW_STUB_INTERMEDIATE; break; default: return E_INVALIDARG; } return S_OK; } HRESULT STDMETHODCALLTYPE ClrDataAccess::FollowStub( /* [in] */ ULONG32 inFlags, /* [in] */ CLRDATA_ADDRESS inAddr, /* [in] */ CLRDATA_FOLLOW_STUB_BUFFER* _inBuffer, /* [out] */ CLRDATA_ADDRESS* outAddr, /* [out] */ CLRDATA_FOLLOW_STUB_BUFFER* _outBuffer, /* [out] */ ULONG32* outFlags) { return FollowStub2(NULL, inFlags, inAddr, _inBuffer, outAddr, _outBuffer, outFlags); } HRESULT STDMETHODCALLTYPE ClrDataAccess::FollowStub2( /* [in] */ IXCLRDataTask* task, /* [in] */ ULONG32 inFlags, /* [in] */ CLRDATA_ADDRESS _inAddr, /* [in] */ CLRDATA_FOLLOW_STUB_BUFFER* _inBuffer, /* [out] */ CLRDATA_ADDRESS* _outAddr, /* [out] */ CLRDATA_FOLLOW_STUB_BUFFER* _outBuffer, /* [out] */ ULONG32* outFlags) { HRESULT status; if ((inFlags & ~(CLRDATA_FOLLOW_STUB_DEFAULT)) != 0) { return E_INVALIDARG; } STUB_BUF* inBuffer = (STUB_BUF*)_inBuffer; STUB_BUF* outBuffer = (STUB_BUF*)_outBuffer; if (inBuffer && (inBuffer->u.flags <= STUB_BUF_FLAGS_START || inBuffer->u.flags >= STUB_BUF_FLAGS_END)) { return E_INVALIDARG; } DAC_ENTER(); EX_TRY { STUB_BUF cycleBuf; TADDR inAddr = TO_TADDR(_inAddr); TADDR outAddr; Thread* thread = task ? ((ClrDataTask*)task)->GetThread() : NULL; ULONG32 loops = 4; for (;;) { if ((status = FollowStubStep(thread, inFlags, inAddr, inBuffer, &outAddr, outBuffer, outFlags)) != S_OK) { break; } // Some stub tracing just requests further iterations // of processing, so detect that case and loop. if (outAddr != inAddr) { // We can make forward progress, we're done. *_outAddr = TO_CDADDR(outAddr); break; } // We need more processing. As a protection // against infinite loops in corrupted or buggy // situations, we only allow this to happen a // small number of times. if (--loops == 0) { ZeroMemory(outBuffer, sizeof(*outBuffer)); status = E_FAIL; break; } cycleBuf = *outBuffer; inBuffer = &cycleBuf; } } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } #ifdef _MSC_VER #pragma warning(push) #pragma warning(disable:4297) #endif // _MSC_VER STDMETHODIMP ClrDataAccess::GetGcNotification(GcEvtArgs* gcEvtArgs) { HRESULT status; DAC_ENTER(); EX_TRY { if (gcEvtArgs->typ >= GC_EVENT_TYPE_MAX) { status = E_INVALIDARG; } else { GcNotifications gn(GetHostGcNotificationTable()); if (!gn.IsActive()) { status = E_OUTOFMEMORY; } else { GcEvtArgs *res = gn.GetNotification(*gcEvtArgs); if (res != NULL) { *gcEvtArgs = *res; status = S_OK; } else { status = E_FAIL; } } } } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } STDMETHODIMP ClrDataAccess::SetGcNotification(IN GcEvtArgs gcEvtArgs) { HRESULT status; DAC_ENTER(); EX_TRY { if (gcEvtArgs.typ >= GC_EVENT_TYPE_MAX) { status = E_INVALIDARG; } else { GcNotifications gn(GetHostGcNotificationTable()); if (!gn.IsActive()) { status = E_OUTOFMEMORY; } else { if (gn.SetNotification(gcEvtArgs) && gn.UpdateOutOfProcTable()) { status = S_OK; } else { status = E_FAIL; } } } } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } #ifdef _MSC_VER #pragma warning(pop) #endif // _MSC_VER HRESULT ClrDataAccess::Initialize(void) { HRESULT hr; CLRDATA_ADDRESS base; // // We do not currently support cross-platform // debugging. Verify that cross-platform is not // being attempted. // // Determine our platform based on the pre-processor macros set when we were built #ifdef TARGET_UNIX #if defined(TARGET_X86) CorDebugPlatform hostPlatform = CORDB_PLATFORM_POSIX_X86; #elif defined(TARGET_AMD64) CorDebugPlatform hostPlatform = CORDB_PLATFORM_POSIX_AMD64; #elif defined(TARGET_ARM) CorDebugPlatform hostPlatform = CORDB_PLATFORM_POSIX_ARM; #elif defined(TARGET_ARM64) CorDebugPlatform hostPlatform = CORDB_PLATFORM_POSIX_ARM64; #else #error Unknown Processor. #endif #else #if defined(TARGET_X86) CorDebugPlatform hostPlatform = CORDB_PLATFORM_WINDOWS_X86; #elif defined(TARGET_AMD64) CorDebugPlatform hostPlatform = CORDB_PLATFORM_WINDOWS_AMD64; #elif defined(TARGET_ARM) CorDebugPlatform hostPlatform = CORDB_PLATFORM_WINDOWS_ARM; #elif defined(TARGET_ARM64) CorDebugPlatform hostPlatform = CORDB_PLATFORM_WINDOWS_ARM64; #else #error Unknown Processor. #endif #endif CorDebugPlatform targetPlatform; IfFailRet(m_pTarget->GetPlatform(&targetPlatform)); if (targetPlatform != hostPlatform) { // DAC fatal error: Platform mismatch - the platform reported by the data target // is not what this version of mscordacwks.dll was built for. return CORDBG_E_UNCOMPATIBLE_PLATFORMS; } // // Get the current DLL base for mscorwks globals. // In case of multiple-CLRs, there may be multiple dlls named "mscorwks". // code:OpenVirtualProcess can take the base address (clrInstanceId) to select exactly // which CLR to is being target. If so, m_globalBase will already be set. // if (m_globalBase == 0) { // Caller didn't specify which CLR to debug, we should be using a legacy data target. if (m_pLegacyTarget == NULL) { DacError(E_INVALIDARG); UNREACHABLE(); } ReleaseHolder<ICLRRuntimeLocator> pRuntimeLocator(NULL); if (m_pLegacyTarget->QueryInterface(__uuidof(ICLRRuntimeLocator), (void**)&pRuntimeLocator) != S_OK || pRuntimeLocator->GetRuntimeBase(&base) != S_OK) { IfFailRet(m_pLegacyTarget->GetImageBase(TARGET_MAIN_CLR_DLL_NAME_W, &base)); } m_globalBase = TO_TADDR(base); } // We don't need to try too hard to prevent // multiple initializations as each one will // copy the same data into the globals and so // cannot interfere with each other. if (!s_procInit) { IfFailRet(GetDacGlobals()); IfFailRet(DacGetHostVtPtrs()); s_procInit = true; } // // DAC is now setup and ready to use // // Do some validation IfFailRet(VerifyDlls()); return S_OK; } Thread* ClrDataAccess::FindClrThreadByTaskId(ULONG64 taskId) { Thread* thread = NULL; if (!ThreadStore::s_pThreadStore) { return NULL; } while ((thread = ThreadStore::GetAllThreadList(thread, 0, 0))) { if (thread->GetThreadId() == (DWORD)taskId) { return thread; } } return NULL; } HRESULT ClrDataAccess::IsPossibleCodeAddress(IN TADDR address) { SUPPORTS_DAC; BYTE testRead; ULONG32 testDone; // First do a trivial check on the readability of the // address. This makes for quick rejection of bogus // addresses that the debugger sends in when searching // stacks for return addresses. // XXX Microsoft - Will this cause problems in minidumps // where it's possible the stub is identifiable but // the stub code isn't present? Yes, but the lack // of that code could confuse the walker on its own // if it does code analysis. if ((m_pTarget->ReadVirtual(address, &testRead, sizeof(testRead), &testDone) != S_OK) || !testDone) { return E_INVALIDARG; } return S_OK; } HRESULT ClrDataAccess::GetFullMethodName( IN MethodDesc* methodDesc, IN ULONG32 symbolChars, OUT ULONG32* symbolLen, _Out_writes_to_opt_(symbolChars, *symbolLen) LPWSTR symbol ) { StackSString s; #ifdef FEATURE_MINIMETADATA_IN_TRIAGEDUMPS PAL_CPP_TRY { #endif // FEATURE_MINIMETADATA_IN_TRIAGEDUMPS TypeString::AppendMethodInternal(s, methodDesc, TypeString::FormatSignature|TypeString::FormatNamespace|TypeString::FormatFullInst); #ifdef FEATURE_MINIMETADATA_IN_TRIAGEDUMPS } PAL_CPP_CATCH_ALL { if (!MdCacheGetEEName(dac_cast<TADDR>(methodDesc), s)) { PAL_CPP_RETHROW; } } PAL_CPP_ENDTRY #endif // FEATURE_MINIMETADATA_IN_TRIAGEDUMPS if (symbol) { // Copy as much as we can and truncate the rest. wcsncpy_s(symbol, symbolChars, s.GetUnicode(), _TRUNCATE); } if (symbolLen) *symbolLen = s.GetCount() + 1; if (symbol != NULL && symbolChars < (s.GetCount() + 1)) return S_FALSE; else return S_OK; } PCSTR ClrDataAccess::GetJitHelperName( IN TADDR address, IN bool dynamicHelpersOnly /*=false*/ ) { const static PCSTR s_rgHelperNames[] = { #define JITHELPER(code,fn,sig) #code, #include <jithelpers.h> }; static_assert_no_msg(ARRAY_SIZE(s_rgHelperNames) == CORINFO_HELP_COUNT); #ifdef TARGET_UNIX if (!dynamicHelpersOnly) #else if (!dynamicHelpersOnly && g_runtimeLoadedBaseAddress <= address && address < g_runtimeLoadedBaseAddress + g_runtimeVirtualSize) #endif // TARGET_UNIX { // Read the whole table from the target in one shot for better performance VMHELPDEF * pTable = static_cast<VMHELPDEF *>( PTR_READ(dac_cast<TADDR>(&hlpFuncTable), CORINFO_HELP_COUNT * sizeof(VMHELPDEF))); for (int i = 0; i < CORINFO_HELP_COUNT; i++) { if (address == (TADDR)(pTable[i].pfnHelper)) return s_rgHelperNames[i]; } } // Check if its a dynamically generated JIT helper const static CorInfoHelpFunc s_rgDynamicHCallIds[] = { #define DYNAMICJITHELPER(code, fn, sig) code, #define JITHELPER(code, fn,sig) #include <jithelpers.h> }; // Read the whole table from the target in one shot for better performance VMHELPDEF * pDynamicTable = static_cast<VMHELPDEF *>( PTR_READ(dac_cast<TADDR>(&hlpDynamicFuncTable), DYNAMIC_CORINFO_HELP_COUNT * sizeof(VMHELPDEF))); for (unsigned d = 0; d < DYNAMIC_CORINFO_HELP_COUNT; d++) { if (address == (TADDR)(pDynamicTable[d].pfnHelper)) { return s_rgHelperNames[s_rgDynamicHCallIds[d]]; } } return NULL; } HRESULT ClrDataAccess::RawGetMethodName( /* [in] */ CLRDATA_ADDRESS address, /* [in] */ ULONG32 flags, /* [in] */ ULONG32 bufLen, /* [out] */ ULONG32 *symbolLen, /* [size_is][out] */ _Out_writes_bytes_opt_(bufLen) WCHAR symbolBuf[ ], /* [out] */ CLRDATA_ADDRESS* displacement) { #ifdef TARGET_ARM _ASSERTE((address & THUMB_CODE) == 0); address &= ~THUMB_CODE; #endif const UINT k_cch64BitHexFormat = ARRAY_SIZE("1234567812345678"); HRESULT status; if (flags != 0) { return E_INVALIDARG; } TADDR taddr; if( (status = TRY_CLRDATA_ADDRESS_TO_TADDR(address, &taddr)) != S_OK ) { return status; } if ((status = IsPossibleCodeAddress(taddr)) != S_OK) { return status; } PTR_StubManager pStubManager; MethodDesc* methodDesc = NULL; { EECodeInfo codeInfo(TO_TADDR(address)); if (codeInfo.IsValid()) { if (displacement) { *displacement = codeInfo.GetRelOffset(); } methodDesc = codeInfo.GetMethodDesc(); goto NameFromMethodDesc; } } pStubManager = StubManager::FindStubManager(TO_TADDR(address)); if (pStubManager != NULL) { if (displacement) { *displacement = 0; } // // Special-cased stub managers // if (pStubManager == PrecodeStubManager::g_pManager) { PCODE alignedAddress = AlignDown(TO_TADDR(address), PRECODE_ALIGNMENT); #ifdef TARGET_ARM alignedAddress += THUMB_CODE; #endif SIZE_T maxPrecodeSize = sizeof(StubPrecode); #ifdef HAS_THISPTR_RETBUF_PRECODE maxPrecodeSize = max(maxPrecodeSize, sizeof(ThisPtrRetBufPrecode)); #endif for (SIZE_T i = 0; i < maxPrecodeSize / PRECODE_ALIGNMENT; i++) { EX_TRY { // Try to find matching precode entrypoint Precode* pPrecode = Precode::GetPrecodeFromEntryPoint(alignedAddress, TRUE); if (pPrecode != NULL) { methodDesc = pPrecode->GetMethodDesc(); if (methodDesc != NULL) { if (DacValidateMD(methodDesc)) { if (displacement) { *displacement = TO_TADDR(address) - PCODEToPINSTR(alignedAddress); } goto NameFromMethodDesc; } } } alignedAddress -= PRECODE_ALIGNMENT; } EX_CATCH { } EX_END_CATCH(SwallowAllExceptions) } } else if (pStubManager == JumpStubStubManager::g_pManager) { PCODE pTarget = decodeBackToBackJump(TO_TADDR(address)); HRESULT hr = GetRuntimeNameByAddress(pTarget, flags, bufLen, symbolLen, symbolBuf, NULL); if (SUCCEEDED(hr)) { return hr; } PCSTR pHelperName = GetJitHelperName(pTarget); if (pHelperName != NULL) { hr = ConvertUtf8(pHelperName, bufLen, symbolLen, symbolBuf); if (FAILED(hr)) return S_FALSE; return hr; } } static WCHAR s_wszFormatNameWithStubManager[] = W("CLRStub[%s]@%I64x"); LPCWSTR wszStubManagerName = pStubManager->GetStubManagerName(TO_TADDR(address)); _ASSERTE(wszStubManagerName != NULL); int result = _snwprintf_s( symbolBuf, bufLen, _TRUNCATE, s_wszFormatNameWithStubManager, wszStubManagerName, // Arg 1 = stub name TO_TADDR(address)); // Arg 2 = stub hex address if (result != -1) { // Printf succeeded, so we have an exact char count to return if (symbolLen) { size_t cchSymbol = wcslen(symbolBuf) + 1; if (!FitsIn<ULONG32>(cchSymbol)) return COR_E_OVERFLOW; *symbolLen = (ULONG32) cchSymbol; } return S_OK; } // Printf failed. Estimate a size that will be at least big enough to hold the name if (symbolLen) { size_t cchSymbol = ARRAY_SIZE(s_wszFormatNameWithStubManager) + wcslen(wszStubManagerName) + k_cch64BitHexFormat + 1; if (!FitsIn<ULONG32>(cchSymbol)) return COR_E_OVERFLOW; *symbolLen = (ULONG32) cchSymbol; } return S_FALSE; } // Do not waste time looking up name for static helper. Debugger can get the actual name from .pdb. PCSTR pHelperName; pHelperName = GetJitHelperName(TO_TADDR(address), true /* dynamicHelpersOnly */); if (pHelperName != NULL) { if (displacement) { *displacement = 0; } HRESULT hr = ConvertUtf8(pHelperName, bufLen, symbolLen, symbolBuf); if (FAILED(hr)) return S_FALSE; return S_OK; } return E_NOINTERFACE; NameFromMethodDesc: if (methodDesc->GetClassification() == mcDynamic && !methodDesc->GetSig()) { // XXX Microsoft - Should this case have a more specific name? static WCHAR s_wszFormatNameAddressOnly[] = W("CLRStub@%I64x"); int result = _snwprintf_s( symbolBuf, bufLen, _TRUNCATE, s_wszFormatNameAddressOnly, TO_TADDR(address)); if (result != -1) { // Printf succeeded, so we have an exact char count to return if (symbolLen) { size_t cchSymbol = wcslen(symbolBuf) + 1; if (!FitsIn<ULONG32>(cchSymbol)) return COR_E_OVERFLOW; *symbolLen = (ULONG32) cchSymbol; } return S_OK; } // Printf failed. Estimate a size that will be at least big enough to hold the name if (symbolLen) { size_t cchSymbol = ARRAY_SIZE(s_wszFormatNameAddressOnly) + k_cch64BitHexFormat + 1; if (!FitsIn<ULONG32>(cchSymbol)) return COR_E_OVERFLOW; *symbolLen = (ULONG32) cchSymbol; } return S_FALSE; } return GetFullMethodName(methodDesc, bufLen, symbolLen, symbolBuf); } HRESULT ClrDataAccess::GetMethodExtents(MethodDesc* methodDesc, METH_EXTENTS** extents) { CLRDATA_ADDRESS_RANGE* curExtent; { // // Get the information from the methoddesc. // We'll go through the CodeManager + JitManagers, so this should work // for all types of managed code. // PCODE methodStart = methodDesc->GetNativeCode(); if (!methodStart) { return E_NOINTERFACE; } EECodeInfo codeInfo(methodStart); _ASSERTE(codeInfo.IsValid()); TADDR codeSize = codeInfo.GetCodeManager()->GetFunctionSize(codeInfo.GetGCInfoToken()); *extents = new (nothrow) METH_EXTENTS; if (!*extents) { return E_OUTOFMEMORY; } (*extents)->numExtents = 1; curExtent = (*extents)->extents; curExtent->startAddress = TO_CDADDR(methodStart); curExtent->endAddress = curExtent->startAddress + codeSize; curExtent++; } (*extents)->curExtent = 0; return S_OK; } // Allocator to pass to the debug-info-stores... BYTE* DebugInfoStoreNew(void * pData, size_t cBytes) { return new (nothrow) BYTE[cBytes]; } HRESULT ClrDataAccess::GetMethodVarInfo(MethodDesc* methodDesc, TADDR address, ULONG32* numVarInfo, ICorDebugInfo::NativeVarInfo** varInfo, ULONG32* codeOffset) { SUPPORTS_DAC; COUNT_T countNativeVarInfo; NewHolder<ICorDebugInfo::NativeVarInfo> nativeVars(NULL); TADDR nativeCodeStartAddr; if (address != NULL) { NativeCodeVersion requestedNativeCodeVersion = ExecutionManager::GetNativeCodeVersion(address); if (requestedNativeCodeVersion.IsNull() || requestedNativeCodeVersion.GetNativeCode() == NULL) { return E_INVALIDARG; } nativeCodeStartAddr = PCODEToPINSTR(requestedNativeCodeVersion.GetNativeCode()); } else { nativeCodeStartAddr = PCODEToPINSTR(methodDesc->GetNativeCode()); } DebugInfoRequest request; request.InitFromStartingAddr(methodDesc, nativeCodeStartAddr); BOOL success = DebugInfoManager::GetBoundariesAndVars( request, DebugInfoStoreNew, NULL, // allocator NULL, NULL, &countNativeVarInfo, &nativeVars); if (!success) { return E_FAIL; } if (!nativeVars || !countNativeVarInfo) { return E_NOINTERFACE; } *numVarInfo = countNativeVarInfo; *varInfo = nativeVars; nativeVars.SuppressRelease(); // To prevent NewHolder from releasing the memory if (codeOffset) { *codeOffset = (ULONG32)(address - nativeCodeStartAddr); } return S_OK; } HRESULT ClrDataAccess::GetMethodNativeMap(MethodDesc* methodDesc, TADDR address, ULONG32* numMap, DebuggerILToNativeMap** map, bool* mapAllocated, CLRDATA_ADDRESS* codeStart, ULONG32* codeOffset) { _ASSERTE((codeOffset == NULL) || (address != NULL)); // Use the DebugInfoStore to get IL->Native maps. // It doesn't matter whether we're jitted, ngenned etc. TADDR nativeCodeStartAddr; if (address != NULL) { NativeCodeVersion requestedNativeCodeVersion = ExecutionManager::GetNativeCodeVersion(address); if (requestedNativeCodeVersion.IsNull() || requestedNativeCodeVersion.GetNativeCode() == NULL) { return E_INVALIDARG; } nativeCodeStartAddr = PCODEToPINSTR(requestedNativeCodeVersion.GetNativeCode()); } else { nativeCodeStartAddr = PCODEToPINSTR(methodDesc->GetNativeCode()); } DebugInfoRequest request; request.InitFromStartingAddr(methodDesc, nativeCodeStartAddr); // Bounds info. ULONG32 countMapCopy; NewHolder<ICorDebugInfo::OffsetMapping> mapCopy(NULL); BOOL success = DebugInfoManager::GetBoundariesAndVars( request, DebugInfoStoreNew, NULL, // allocator &countMapCopy, &mapCopy, NULL, NULL); if (!success) { return E_FAIL; } // Need to convert map formats. *numMap = countMapCopy; *map = new (nothrow) DebuggerILToNativeMap[countMapCopy]; if (!*map) { return E_OUTOFMEMORY; } ULONG32 i; for (i = 0; i < *numMap; i++) { (*map)[i].ilOffset = mapCopy[i].ilOffset; (*map)[i].nativeStartOffset = mapCopy[i].nativeOffset; if (i > 0) { (*map)[i - 1].nativeEndOffset = (*map)[i].nativeStartOffset; } (*map)[i].source = mapCopy[i].source; } if (*numMap >= 1) { (*map)[i - 1].nativeEndOffset = 0; } // Update varion out params. if (codeStart) { *codeStart = TO_CDADDR(nativeCodeStartAddr); } if (codeOffset) { *codeOffset = (ULONG32)(address - nativeCodeStartAddr); } *mapAllocated = true; return S_OK; } // Get the MethodDesc for a function // Arguments: // Input: // pModule - pointer to the module for the function // memberRef - metadata token for the function // Return Value: // MethodDesc for the function MethodDesc * ClrDataAccess::FindLoadedMethodRefOrDef(Module* pModule, mdToken memberRef) { CONTRACT(MethodDesc *) { GC_NOTRIGGER; PRECONDITION(CheckPointer(pModule)); POSTCONDITION(CheckPointer(RETVAL, NULL_OK)); } CONTRACT_END; // Must have a MemberRef or a MethodDef mdToken tkType = TypeFromToken(memberRef); _ASSERTE((tkType == mdtMemberRef) || (tkType == mdtMethodDef)); if (tkType == mdtMemberRef) { RETURN pModule->LookupMemberRefAsMethod(memberRef); } RETURN pModule->LookupMethodDef(memberRef); } // FindLoadedMethodRefOrDef // // ReportMem - report a region of memory for dump gathering // // If you specify that you expect success, any failure will cause ReportMem to // return false. If you do not expect success, true is always returned. // This function only throws when all dump collection should be cancelled. // // Arguments: // addr - the starting target address for the memory to report // size - the length (in bytes) to report // fExpectSuccess - if true (the default), then we expect that this region of memory // should be fully readable. Any read errors indicate a corrupt target. // bool ClrDataAccess::ReportMem(TADDR addr, TSIZE_T size, bool fExpectSuccess /*= true*/) { SUPPORTS_DAC_HOST_ONLY; // This block of code is to help debugging blocks that we report // to minidump/heapdump. You can set break point here to view the static // variable to figure out the size of blocks that we are reporting. // Most useful is set conditional break point to catch large chuck of // memory. We will leave it here for all builds. // static TADDR debugAddr; static TSIZE_T debugSize; debugAddr = addr; debugSize = size; HRESULT status; if (!addr || addr == (TADDR)-1 || !size) { if (fExpectSuccess) return false; else return true; } // // Try and sanity-check the reported region of memory // #ifdef _DEBUG // in debug builds, sanity-check all reports const TSIZE_T k_minSizeToCheck = 1; #else // in retail builds, only sanity-check larger chunks which have the potential to waste a // lot of time and/or space. This avoids the overhead of checking for the majority of // memory regions (which are small). const TSIZE_T k_minSizeToCheck = 1024; #endif if (size >= k_minSizeToCheck) { if (!IsFullyReadable(addr, size)) { if (!fExpectSuccess) { // We know the read might fail (eg. we're trying to find mapped pages in // a module image), so just skip this block silently. // Note that the EnumMemoryRegion callback won't necessarily do anything if any part of // the region is unreadable, and so there is no point in calling it. For cases where we expect // the read might fail, but we want to report any partial blocks, we have to break up the region // into pages and try reporting each page anyway return true; } // We're reporting bogus memory, so the target must be corrupt (or there is a issue). We should abort // reporting and continue with the next data structure (where the exception is caught), // just like we would for a DAC read error (otherwise we might do something stupid // like get into an infinite loop, or otherwise waste time with corrupt data). TARGET_CONSISTENCY_CHECK(false, "Found unreadable memory while reporting memory regions for dump gathering"); return false; } } // Minidumps should never contain data structures that are anywhere near 4MB. If we see this, it's // probably due to memory corruption. To keep the dump small, we'll truncate the block. Note that // the size to which the block is truncated is pretty unique, so should be good evidence in a dump // that this has happened. // Note that it's hard to say what a good value would be here, or whether we should dump any of the // data structure at all. Hopefully experience will help guide this going forward. // @dbgtodo : Extend dump-gathering API to allow a dump-log to be included. const TSIZE_T kMaxMiniDumpRegion = 4*1024*1024 - 3; // 4MB-3 if( size > kMaxMiniDumpRegion && (m_enumMemFlags == CLRDATA_ENUM_MEM_MINI || m_enumMemFlags == CLRDATA_ENUM_MEM_TRIAGE)) { TARGET_CONSISTENCY_CHECK( false, "Dump target consistency failure - truncating minidump data structure"); size = kMaxMiniDumpRegion; } // track the total memory reported. m_cbMemoryReported += size; // ICLRData APIs take only 32-bit sizes. In practice this will almost always be sufficient, but // in theory we might have some >4GB ranges on large 64-bit processes doing a heap dump // (for example, the code:LoaderHeap). If necessary, break up the reporting into maximum 4GB // chunks so we can use the existing API. // @dbgtodo : ICorDebugDataTarget should probably use 64-bit sizes while (size) { ULONG32 enumSize; if (size > UINT32_MAX) { enumSize = UINT32_MAX; } else { enumSize = (ULONG32)size; } // Actually perform the memory reporting callback status = m_enumMemCb->EnumMemoryRegion(TO_CDADDR(addr), enumSize); if (status != S_OK) { // If dump generation was cancelled, allow us to throw upstack so we'll actually quit. if ((fExpectSuccess) && (status != COR_E_OPERATIONCANCELED)) return false; } // If the return value of EnumMemoryRegion is COR_E_OPERATIONCANCELED, // it means that user has requested that the minidump gathering be canceled. // To do this we throw an exception which is caught in EnumMemoryRegionsWrapper. if (status == COR_E_OPERATIONCANCELED) { ThrowHR(status); } // Move onto the next chunk (if any) size -= enumSize; addr += enumSize; } return true; } // // DacUpdateMemoryRegion - updates/poisons a region of memory of generated dump // // Parameters: // addr - target address of the beginning of the memory region // bufferSize - number of bytes to update/poison // buffer - data to be written at given target address // bool ClrDataAccess::DacUpdateMemoryRegion(TADDR addr, TSIZE_T bufferSize, BYTE* buffer) { SUPPORTS_DAC_HOST_ONLY; HRESULT status; if (!addr || addr == (TADDR)-1 || !bufferSize) { return false; } // track the total memory reported. m_cbMemoryReported += bufferSize; if (m_updateMemCb == NULL) { return false; } // Actually perform the memory updating callback status = m_updateMemCb->UpdateMemoryRegion(TO_CDADDR(addr), (ULONG32)bufferSize, buffer); if (status != S_OK) { return false; } return true; } // // Check whether a region of target memory is fully readable. // // Arguments: // addr The base target address of the region // size The size of the region to analyze // // Return value: // True if the entire regions appears to be readable, false otherwise. // // Notes: // The motivation here is that reporting large regions of unmapped address space to dbgeng can result in // it taking a long time trying to identify a valid subrange. This can happen when the target // memory is corrupt, and we enumerate a data structure with a dynamic size. Ideally we would just spec // the ICLRDataEnumMemoryRegionsCallback API to require the client to fail if it detects an unmapped // memory address in the region. However, we can't change the existing dbgeng code, so for now we'll // rely on this heuristic here. // @dbgtodo : Try and get the dbg team to change their EnumMemoryRegion behavior. See DevDiv Bugs 6265 // bool ClrDataAccess::IsFullyReadable(TADDR taBase, TSIZE_T dwSize) { // The only way we have to verify that a memory region is readable is to try reading it in it's // entirety. This is potentially expensive, so we'll rely on a heuristic that spot-checks various // points in the region. // Ensure we've got something to check if( dwSize == 0 ) return true; // Check for overflow TADDR taEnd = DacTAddrOffset(taBase, dwSize, 1); // Loop through using expontential growth, being sure to check both the first and last byte TADDR taCurr = taBase; TSIZE_T dwInc = 4096; bool bDone = false; while (!bDone) { // Try and read a byte from the target. Note that we don't use PTR_BYTE here because we don't want // the overhead of inserting entries into the DAC instance cache. BYTE b; ULONG32 dwBytesRead; HRESULT hr = m_pTarget->ReadVirtual(taCurr, &b, 1, &dwBytesRead); if( hr != S_OK || dwBytesRead < 1 ) { return false; } if (taEnd - taCurr <= 1) { // We just read the last byte so we're done _ASSERTE( taCurr = taEnd - 1 ); bDone = true; } else if (dwInc == 0 || dwInc >= taEnd - taCurr) { // we've reached the end of the exponential series, check the last byte taCurr = taEnd - 1; } else { // advance current pointer (subtraction above ensures this won't overflow) taCurr += dwInc; // double the increment for next time (or set to 0 if it's already the max) dwInc <<= 1; } } return true; } JITNotification* ClrDataAccess::GetHostJitNotificationTable() { if (m_jitNotificationTable == NULL) { m_jitNotificationTable = JITNotifications::InitializeNotificationTable(1000); } return m_jitNotificationTable; } GcNotification* ClrDataAccess::GetHostGcNotificationTable() { if (m_gcNotificationTable == NULL) { m_gcNotificationTable = GcNotifications::InitializeNotificationTable(128); } return m_gcNotificationTable; } /* static */ bool ClrDataAccess::GetMetaDataFileInfoFromPEFile(PEAssembly *pPEAssembly, DWORD &dwTimeStamp, DWORD &dwSize, DWORD &dwDataSize, DWORD &dwRvaHint, bool &isNGEN, _Out_writes_(cchFilePath) LPWSTR wszFilePath, const DWORD cchFilePath) { SUPPORTS_DAC_HOST_ONLY; PEImage *mdImage = NULL; PEImageLayout *layout; IMAGE_DATA_DIRECTORY *pDir = NULL; COUNT_T uniPathChars = 0; isNGEN = false; if (pDir == NULL || pDir->Size == 0) { mdImage = pPEAssembly->GetPEImage(); if (mdImage != NULL) { layout = mdImage->GetLoadedLayout(); pDir = &layout->GetCorHeader()->MetaData; // In IL image case, we do not have any hint to IL metadata since it is stored // in the corheader. // dwRvaHint = 0; dwDataSize = pDir->Size; } else { return false; } } // Do not fail if path can not be read. Triage dumps don't have paths and we want to fallback // on searching metadata from IL image. mdImage->GetPath().DacGetUnicode(cchFilePath, wszFilePath, &uniPathChars); if (!mdImage->HasNTHeaders() || !mdImage->HasCorHeader() || !mdImage->HasLoadedLayout() || (uniPathChars > cchFilePath)) { return false; } // It is possible that the module is in-memory. That is the wszFilePath here is empty. // We will try to use the module name instead in this case for hosting debugger // to find match. if (wcslen(wszFilePath) == 0) { mdImage->GetModuleFileNameHintForDAC().DacGetUnicode(cchFilePath, wszFilePath, &uniPathChars); if (uniPathChars > cchFilePath) { return false; } } dwTimeStamp = layout->GetTimeDateStamp(); dwSize = (ULONG32)layout->GetVirtualSize(); return true; } /* static */ bool ClrDataAccess::GetILImageInfoFromNgenPEFile(PEAssembly *pPEAssembly, DWORD &dwTimeStamp, DWORD &dwSize, _Out_writes_(cchFilePath) LPWSTR wszFilePath, const DWORD cchFilePath) { SUPPORTS_DAC_HOST_ONLY; DWORD dwWritten = 0; // use the IL File name if (!pPEAssembly->GetPath().DacGetUnicode(cchFilePath, wszFilePath, (COUNT_T *)(&dwWritten))) { // Use DAC hint to retrieve the IL name. pPEAssembly->GetModuleFileNameHint().DacGetUnicode(cchFilePath, wszFilePath, (COUNT_T *)(&dwWritten)); } dwTimeStamp = 0; dwSize = 0; return true; } void * ClrDataAccess::GetMetaDataFromHost(PEAssembly* pPEAssembly, bool* isAlternate) { DWORD imageTimestamp, imageSize, dataSize; void* buffer = NULL; WCHAR uniPath[MAX_LONGPATH] = {0}; bool isAlt = false; bool isNGEN = false; DAC_INSTANCE* inst = NULL; HRESULT hr = S_OK; DWORD ulRvaHint; // // We always ask for the IL image metadata, // as we expect that to be more // available than others. The drawback is that // there may be differences between the IL image // metadata and native image metadata, so we // have to mark such alternate metadata so that // we can fail unsupported usage of it. // // Microsoft - above comment seems to be an unimplemented thing. // The DAC_MD_IMPORT.isAlternate field gets ultimately set, but // on the searching I did, I cannot find any usage of it // other than in the ctor. Should we be doing something, or should // we remove this comment and the isAlternate field? // It's possible that test will want us to track whether we have // an IL image's metadata loaded against an NGEN'ed image // so the field remains for now. if (!ClrDataAccess::GetMetaDataFileInfoFromPEFile( pPEAssembly, imageTimestamp, imageSize, dataSize, ulRvaHint, isNGEN, uniPath, ARRAY_SIZE(uniPath))) { return NULL; } // try direct match for the image that is loaded into the managed process pPEAssembly->GetLoadedMetadata((COUNT_T *)(&dataSize)); DWORD allocSize = 0; if (!ClrSafeInt<DWORD>::addition(dataSize, sizeof(DAC_INSTANCE), allocSize)) { DacError(HRESULT_FROM_WIN32(ERROR_ARITHMETIC_OVERFLOW)); } inst = m_instances.Alloc(0, allocSize, DAC_DPTR); if (!inst) { DacError(E_OUTOFMEMORY); return NULL; } buffer = (void*)(inst + 1); // APIs implemented by hosting debugger. It can use the path/filename, timestamp, and // pPEAssembly size to find an exact match for the image. If that fails for an ngen'ed image, // we can request the IL image which it came from. if (m_legacyMetaDataLocator) { // Legacy API implemented by hosting debugger. hr = m_legacyMetaDataLocator->GetMetadata( uniPath, imageTimestamp, imageSize, NULL, // MVID - not used yet ulRvaHint, 0, // flags - reserved for future. dataSize, (BYTE*)buffer, NULL); } else { hr = m_target3->GetMetaData( uniPath, imageTimestamp, imageSize, NULL, // MVID - not used yet ulRvaHint, 0, // flags - reserved for future. dataSize, (BYTE*)buffer, NULL); } if (FAILED(hr) && isNGEN) { // We failed to locate the ngen'ed image. We should try to // find the matching IL image // isAlt = true; if (!ClrDataAccess::GetILImageInfoFromNgenPEFile( pPEAssembly, imageTimestamp, imageSize, uniPath, ARRAY_SIZE(uniPath))) { goto ErrExit; } const WCHAR* ilExtension = W("dll"); WCHAR ngenImageName[MAX_LONGPATH] = {0}; if (wcscpy_s(ngenImageName, ARRAY_SIZE(ngenImageName), uniPath) != 0) { goto ErrExit; } if (wcscpy_s(uniPath, ARRAY_SIZE(uniPath), ngenImageName) != 0) { goto ErrExit; } // RVA size in ngen image and IL image is the same. Because the only // different is in RVA. That is 4 bytes column fixed. // // try again if (m_legacyMetaDataLocator) { hr = m_legacyMetaDataLocator->GetMetadata( uniPath, imageTimestamp, imageSize, NULL, // MVID - not used yet 0, // pass zero hint here... important 0, // flags - reserved for future. dataSize, (BYTE*)buffer, NULL); } else { hr = m_target3->GetMetaData( uniPath, imageTimestamp, imageSize, NULL, // MVID - not used yet 0, // pass zero hint here... important 0, // flags - reserved for future. dataSize, (BYTE*)buffer, NULL); } } if (FAILED(hr)) { goto ErrExit; } *isAlternate = isAlt; m_instances.AddSuperseded(inst); return buffer; ErrExit: if (inst != NULL) { m_instances.ReturnAlloc(inst); } return NULL; } //++++++++++++++++++++++++++++++++++++++++++++++++++++++++ // // Given a PEAssembly or a ReflectionModule try to find the corresponding metadata // We will first ask debugger to locate it. If fail, we will try // to get it from the target process // //++++++++++++++++++++++++++++++++++++++++++++++++++++++++ IMDInternalImport* ClrDataAccess::GetMDImport(const PEAssembly* pPEAssembly, const ReflectionModule* reflectionModule, bool throwEx) { HRESULT status; PTR_CVOID mdBaseTarget = NULL; COUNT_T mdSize; IMDInternalImport* mdImport = NULL; PVOID mdBaseHost = NULL; bool isAlternate = false; _ASSERTE((pPEAssembly == NULL && reflectionModule != NULL) || (pPEAssembly != NULL && reflectionModule == NULL)); TADDR peAssemblyAddr = (pPEAssembly != NULL) ? dac_cast<TADDR>(pPEAssembly) : dac_cast<TADDR>(reflectionModule); // // Look for one we've already created. // mdImport = m_mdImports.Get(peAssemblyAddr); if (mdImport != NULL) { return mdImport; } if (pPEAssembly != NULL) { // Get the metadata size mdBaseTarget = const_cast<PEAssembly*>(pPEAssembly)->GetLoadedMetadata(&mdSize); } else if (reflectionModule != NULL) { // Get the metadata PTR_SBuffer metadataBuffer = reflectionModule->GetDynamicMetadataBuffer(); if (metadataBuffer != PTR_NULL) { mdBaseTarget = dac_cast<PTR_CVOID>((metadataBuffer->DacGetRawBuffer()).StartAddress()); mdSize = metadataBuffer->GetSize(); } else { if (throwEx) { DacError(E_FAIL); } return NULL; } } else { if (throwEx) { DacError(E_FAIL); } return NULL; } if (mdBaseTarget == PTR_NULL) { mdBaseHost = NULL; } else { // // Maybe the target process has the metadata // Find out where the metadata for the image is // in the target's memory. // // // Read the metadata into the host process. Make sure pass in false in the last // parameter. This is only matters when producing skinny mini-dump. This will // prevent metadata gets reported into mini-dump. // mdBaseHost = DacInstantiateTypeByAddressNoReport(dac_cast<TADDR>(mdBaseTarget), mdSize, false); } // Try to see if debugger can locate it if (pPEAssembly != NULL && mdBaseHost == NULL && (m_target3 || m_legacyMetaDataLocator)) { // We couldn't read the metadata from memory. Ask // the target for metadata as it may be able to // provide it from some alternate means. mdBaseHost = GetMetaDataFromHost(const_cast<PEAssembly *>(pPEAssembly), &isAlternate); } if (mdBaseHost == NULL) { // cannot locate metadata anywhere if (throwEx) { DacError(E_INVALIDARG); } return NULL; } // // Open the MD interface on the host copy of the metadata. // status = GetMDInternalInterface(mdBaseHost, mdSize, ofRead, IID_IMDInternalImport, (void**)&mdImport); if (status != S_OK) { if (throwEx) { DacError(status); } return NULL; } // // Remember the object for this module for // possible later use. // The m_mdImports list does get cleaned up by calls to ClrDataAccess::Flush, // i.e. every time the process changes state. if (m_mdImports.Add(peAssemblyAddr, mdImport, isAlternate) == NULL) { mdImport->Release(); DacError(E_OUTOFMEMORY); } return mdImport; } // // Set whether inconsistencies in the target should raise asserts. // This overrides the default initial setting. // // Arguments: // fEnableAsserts - whether ASSERTs in dacized code should be enabled // void ClrDataAccess::SetTargetConsistencyChecks(bool fEnableAsserts) { LIMITED_METHOD_DAC_CONTRACT; m_fEnableTargetConsistencyAsserts = fEnableAsserts; } // // Get whether inconsistencies in the target should raise asserts. // // Return value: // whether ASSERTs in dacized code should be enabled // // Notes: // The implementation of ASSERT accesses this via code:DacTargetConsistencyAssertsEnabled // // By default, this is disabled, unless COMPlus_DbgDACEnableAssert is set (see code:ClrDataAccess::ClrDataAccess). // This is necessary for compatibility. For example, SOS expects to be able to scan for // valid MethodTables etc. (which may cause ASSERTs), and also doesn't want ASSERTs when working // with targets with corrupted memory. // // Calling code:ClrDataAccess::SetTargetConsistencyChecks overrides the default setting. // bool ClrDataAccess::TargetConsistencyAssertsEnabled() { LIMITED_METHOD_DAC_CONTRACT; return m_fEnableTargetConsistencyAsserts; } // // VerifyDlls - Validate that the mscorwks in the target matches this version of mscordacwks // Only done on Windows and Mac builds at the moment. // See code:CordbProcess::CordbProcess#DBIVersionChecking for more information regarding version checking. // HRESULT ClrDataAccess::VerifyDlls() { #ifndef TARGET_UNIX // Provide a knob for disabling this check if we really want to try and proceed anyway with a // DAC mismatch. DAC behavior may be arbitrarily bad - globals probably won't be at the same // address, data structures may be laid out differently, etc. if (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_DbgDACSkipVerifyDlls)) { return S_OK; } // Read the debug directory timestamp from the target mscorwks image using DAC // Note that we don't use the PE timestamp because the PE pPEAssembly might be changed in ways // that don't effect the PDB (and therefore don't effect DAC). Specifically, we rebase // our DLLs at the end of a build, that changes the PE pPEAssembly, but not the PDB. // Note that if we wanted to be extra careful, we could read the CV contents (which includes // the GUID signature) and verify it matches. Using the timestamp is useful for helpful error // messages, and should be sufficient in any real scenario. DWORD timestamp = 0; HRESULT hr = S_OK; DAC_ENTER(); EX_TRY { // Note that we don't need to worry about ensuring the image memory read by this code // is saved in a minidump. Managed minidump debugging already requires that you have // the full mscorwks.dll available at debug time (eg. windbg won't even load DAC without it). PEDecoder pedecoder(dac_cast<PTR_VOID>(m_globalBase)); // We use the first codeview debug directory entry since this should always refer to the single // PDB for mscorwks.dll. const UINT k_maxDebugEntries = 32; // a reasonable upper limit in case of corruption for( UINT i = 0; i < k_maxDebugEntries; i++) { PTR_IMAGE_DEBUG_DIRECTORY pDebugEntry = pedecoder.GetDebugDirectoryEntry(i); // If there are no more entries, then stop if (pDebugEntry == NULL) break; // Ignore non-codeview entries. Some scenarios (eg. optimized builds), there may be extra // debug directory entries at the end of some other type. if (pDebugEntry->Type == IMAGE_DEBUG_TYPE_CODEVIEW) { // Found a codeview entry - use it's timestamp for comparison timestamp = pDebugEntry->TimeDateStamp; break; } } char szMsgBuf[1024]; _snprintf_s(szMsgBuf, sizeof(szMsgBuf), _TRUNCATE, "Failed to find any valid codeview debug directory entry in %s image", MAIN_CLR_MODULE_NAME_A); _ASSERTE_MSG(timestamp != 0, szMsgBuf); } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &hr)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); if (FAILED(hr)) { return hr; } // Validate that we got a timestamp and it matches what the DAC table told us to expect if (timestamp == 0 || timestamp != g_dacTableInfo.dwID0) { // Timestamp mismatch. This means mscordacwks is being used with a version of // mscorwks other than the one it was built for. This will not work reliably. #ifdef _DEBUG // Check if verbose asserts are enabled. The default is up to the specific instantiation of // ClrDataAccess, but can be overridden (in either direction) by a COMPlus_ knob. // Note that we check this knob every time because it may be handy to turn it on in // the environment mid-flight. DWORD dwAssertDefault = m_fEnableDllVerificationAsserts ? 1 : 0; if (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_DbgDACAssertOnMismatch, dwAssertDefault)) { // Output a nice error message that contains the timestamps in string format. time_t actualTime = timestamp; char szActualTime[30]; ctime_s(szActualTime, sizeof(szActualTime), &actualTime); time_t expectedTime = g_dacTableInfo.dwID0; char szExpectedTime[30]; ctime_s(szExpectedTime, sizeof(szExpectedTime), &expectedTime); // Create a nice detailed message for the assert dialog. // Note that the strings returned by ctime_s have terminating newline characters. // This is technically a TARGET_CONSISTENCY_CHECK because a corrupt target could, // in-theory, have a corrupt mscrowks PE header and cause this check to fail // unnecessarily. However, this check occurs during startup, before we know // whether target consistency checks should be enabled, so it's always enabled // at the moment. char szMsgBuf[1024]; _snprintf_s(szMsgBuf, sizeof(szMsgBuf), _TRUNCATE, "DAC fatal error: %s/mscordacwks.dll version mismatch\n\n"\ "The debug directory timestamp of the loaded %s does not match the\n"\ "version mscordacwks.dll was built for.\n"\ "Expected %s timestamp: %s"\ "Actual %s timestamp: %s\n"\ "DAC will now fail to initialize with a CORDBG_E_MISMATCHED_CORWKS_AND_DACWKS_DLLS\n"\ "error. If you really want to try and use the mimatched DLLs, you can disable this\n"\ "check by setting COMPlus_DbgDACSkipVerifyDlls=1. However, using a mismatched DAC\n"\ "DLL will usually result in arbitrary debugger failures.\n", TARGET_MAIN_CLR_DLL_NAME_A, TARGET_MAIN_CLR_DLL_NAME_A, TARGET_MAIN_CLR_DLL_NAME_A, szExpectedTime, TARGET_MAIN_CLR_DLL_NAME_A, szActualTime); _ASSERTE_MSG(false, szMsgBuf); } #endif // Return a specific hresult indicating this problem return CORDBG_E_MISMATCHED_CORWKS_AND_DACWKS_DLLS; } #endif // TARGET_UNIX return S_OK; } #ifdef FEATURE_MINIMETADATA_IN_TRIAGEDUMPS void ClrDataAccess::InitStreamsForWriting(IN CLRDataEnumMemoryFlags flags) { // enforce this should only be called when generating triage and mini-dumps if (flags != CLRDATA_ENUM_MEM_MINI && flags != CLRDATA_ENUM_MEM_TRIAGE) return; EX_TRY { if (m_streams == NULL) m_streams = new DacStreamManager(g_MiniMetaDataBuffAddress, g_MiniMetaDataBuffMaxSize); if (!m_streams->PrepareStreamsForWriting()) { delete m_streams; m_streams = NULL; } } EX_CATCH { if (m_streams != NULL) { delete m_streams; m_streams = NULL; } } EX_END_CATCH(SwallowAllExceptions) } bool ClrDataAccess::MdCacheAddEEName(TADDR taEEStruct, const SString& name) { bool result = false; EX_TRY { if (m_streams != NULL) result = m_streams->MdCacheAddEEName(taEEStruct, name); } EX_CATCH { result = false; } EX_END_CATCH(SwallowAllExceptions) return result; } void ClrDataAccess::EnumStreams(IN CLRDataEnumMemoryFlags flags) { // enforce this should only be called when generating triage and mini-dumps if (flags != CLRDATA_ENUM_MEM_MINI && flags != CLRDATA_ENUM_MEM_TRIAGE) return; EX_TRY { if (m_streams != NULL) m_streams->EnumStreams(flags); } EX_CATCH { } EX_END_CATCH(SwallowAllExceptions) } bool ClrDataAccess::MdCacheGetEEName(TADDR taEEStruct, SString & eeName) { bool result = false; EX_TRY { if (m_streams == NULL) m_streams = new DacStreamManager(g_MiniMetaDataBuffAddress, g_MiniMetaDataBuffMaxSize); result = m_streams->MdCacheGetEEName(taEEStruct, eeName); } EX_CATCH { result = false; } EX_END_CATCH(SwallowAllExceptions) return result; } #endif // FEATURE_MINIMETADATA_IN_TRIAGEDUMPS // Needed for RT_RCDATA. #define MAKEINTRESOURCE(v) MAKEINTRESOURCEW(v) // this funny looking double macro forces x to be macro expanded before L is prepended #define _WIDE(x) _WIDE2(x) #define _WIDE2(x) W(x) HRESULT GetDacTableAddress(ICorDebugDataTarget* dataTarget, ULONG64 baseAddress, PULONG64 dacTableAddress) { #ifdef TARGET_UNIX #ifdef USE_DAC_TABLE_RVA #ifdef DAC_TABLE_SIZE if (DAC_TABLE_SIZE != sizeof(g_dacGlobals)) { return E_INVALIDARG; } #endif // On MacOS, FreeBSD or NetBSD use the RVA include file *dacTableAddress = baseAddress + DAC_TABLE_RVA; #else // On Linux/MacOS try to get the dac table address via the export symbol if (!TryGetSymbol(dataTarget, baseAddress, "g_dacTable", dacTableAddress)) { return CORDBG_E_MISSING_DEBUGGER_EXPORTS; } #endif #endif return S_OK; } HRESULT ClrDataAccess::GetDacGlobals() { #ifdef TARGET_UNIX ULONG64 dacTableAddress; HRESULT hr = GetDacTableAddress(m_pTarget, m_globalBase, &dacTableAddress); if (FAILED(hr)) { return hr; } if (FAILED(ReadFromDataTarget(m_pTarget, dacTableAddress, (BYTE*)&g_dacGlobals, sizeof(g_dacGlobals)))) { return CORDBG_E_MISSING_DEBUGGER_EXPORTS; } if (g_dacGlobals.ThreadStore__s_pThreadStore == NULL) { return CORDBG_E_UNSUPPORTED; } return S_OK; #else HRESULT status = E_FAIL; DWORD rsrcRVA = 0; LPVOID rsrcData = NULL; DWORD rsrcSize = 0; DWORD resourceSectionRVA = 0; if (FAILED(status = GetMachineAndResourceSectionRVA(m_pTarget, m_globalBase, NULL, &resourceSectionRVA))) { _ASSERTE_MSG(false, "DAC fatal error: can't locate resource section in " TARGET_MAIN_CLR_DLL_NAME_A); return CORDBG_E_MISSING_DEBUGGER_EXPORTS; } if (FAILED(status = GetResourceRvaFromResourceSectionRvaByName(m_pTarget, m_globalBase, resourceSectionRVA, (DWORD)(size_t)RT_RCDATA, _WIDE(DACCESS_TABLE_RESOURCE), 0, &rsrcRVA, &rsrcSize))) { _ASSERTE_MSG(false, "DAC fatal error: can't locate DAC table resource in " TARGET_MAIN_CLR_DLL_NAME_A); return CORDBG_E_MISSING_DEBUGGER_EXPORTS; } rsrcData = new (nothrow) BYTE[rsrcSize]; if (rsrcData == NULL) return E_OUTOFMEMORY; if (FAILED(status = ReadFromDataTarget(m_pTarget, m_globalBase + rsrcRVA, (BYTE*)rsrcData, rsrcSize))) { _ASSERTE_MSG(false, "DAC fatal error: can't load DAC table resource from " TARGET_MAIN_CLR_DLL_NAME_A); return CORDBG_E_MISSING_DEBUGGER_EXPORTS; } PBYTE rawData = (PBYTE)rsrcData; DWORD bytesLeft = rsrcSize; // Read the header struct DacTableHeader header; // We currently expect the header to be 2 32-bit values and 1 16-byte value, // make sure there is no packing going on or anything. static_assert_no_msg(sizeof(DacTableHeader) == 2 * 4 + 16); if (bytesLeft < sizeof(DacTableHeader)) { _ASSERTE_MSG(false, "DAC fatal error: DAC table too small for header."); goto Exit; } memcpy(&header, rawData, sizeof(DacTableHeader)); rawData += sizeof(DacTableHeader); bytesLeft -= sizeof(DacTableHeader); // Save the table info for later use g_dacTableInfo = header.info; // Sanity check that the DAC table is the size we expect. // This could fail if a different version of dacvars.h or vptr_list.h was used when building // mscordacwks.dll than when running DacTableGen. if (offsetof(DacGlobals, EEJitManager__vtAddr) != header.numGlobals * sizeof(ULONG)) { #ifdef _DEBUG char szMsgBuf[1024]; _snprintf_s(szMsgBuf, sizeof(szMsgBuf), _TRUNCATE, "DAC fatal error: mismatch in number of globals in DAC table. Read from file: %d, expected: %zd.", header.numGlobals, (size_t)offsetof(DacGlobals, EEJitManager__vtAddr) / sizeof(ULONG)); _ASSERTE_MSG(false, szMsgBuf); #endif // _DEBUG status = E_INVALIDARG; goto Exit; } if (sizeof(DacGlobals) != (header.numGlobals + header.numVptrs) * sizeof(ULONG)) { #ifdef _DEBUG char szMsgBuf[1024]; _snprintf_s(szMsgBuf, sizeof(szMsgBuf), _TRUNCATE, "DAC fatal error: mismatch in number of vptrs in DAC table. Read from file: %d, expected: %zd.", header.numVptrs, (size_t)(sizeof(DacGlobals) - offsetof(DacGlobals, EEJitManager__vtAddr)) / sizeof(ULONG)); _ASSERTE_MSG(false, szMsgBuf); #endif // _DEBUG status = E_INVALIDARG; goto Exit; } // Copy the DAC table into g_dacGlobals if (bytesLeft < sizeof(DacGlobals)) { _ASSERTE_MSG(false, "DAC fatal error: DAC table resource too small for DacGlobals."); status = E_UNEXPECTED; goto Exit; } memcpy(&g_dacGlobals, rawData, sizeof(DacGlobals)); rawData += sizeof(DacGlobals); bytesLeft -= sizeof(DacGlobals); status = S_OK; Exit: return status; #endif } #undef MAKEINTRESOURCE //---------------------------------------------------------------------------- // // IsExceptionFromManagedCode - report if pExceptionRecord points to an exception belonging to the current runtime // // Arguments: // pExceptionRecord - the exception record // // Return Value: // TRUE if it is // Otherwise, FALSE // //---------------------------------------------------------------------------- BOOL ClrDataAccess::IsExceptionFromManagedCode(EXCEPTION_RECORD* pExceptionRecord) { DAC_ENTER(); BOOL flag = FALSE; if (::IsExceptionFromManagedCode(pExceptionRecord)) { flag = TRUE; } DAC_LEAVE(); return flag; } #ifndef TARGET_UNIX //---------------------------------------------------------------------------- // // GetWatsonBuckets - retrieve Watson buckets from the specified thread // // Arguments: // dwThreadId - the thread ID // pGM - pointer to the space to store retrieved Watson buckets // // Return Value: // S_OK if the operation is successful. // or S_FALSE if Watson buckets cannot be found // else detailed error code. // //---------------------------------------------------------------------------- HRESULT ClrDataAccess::GetWatsonBuckets(DWORD dwThreadId, GenericModeBlock * pGM) { _ASSERTE((dwThreadId != 0) && (pGM != NULL)); if ((dwThreadId == 0) || (pGM == NULL)) { return E_INVALIDARG; } DAC_ENTER(); Thread * pThread = DacGetThread(dwThreadId); _ASSERTE(pThread != NULL); HRESULT hr = E_UNEXPECTED; if (pThread != NULL) { hr = GetClrWatsonBucketsWorker(pThread, pGM); } DAC_LEAVE(); return hr; } #endif // TARGET_UNIX //---------------------------------------------------------------------------- // // CLRDataAccessCreateInstance - create and initialize a ClrDataAccess object // // Arguments: // pLegacyTarget - data target object // pClrDataAccess - ClrDataAccess object // // Return Value: // S_OK on success, else detailed error code. // //---------------------------------------------------------------------------- STDAPI CLRDataAccessCreateInstance(ICLRDataTarget * pLegacyTarget, ClrDataAccess ** pClrDataAccess) { if ((pLegacyTarget == NULL) || (pClrDataAccess == NULL)) { return E_INVALIDARG; } *pClrDataAccess = NULL; // Create an adapter which implements the new ICorDebugDataTarget interfaces using // a legacy implementation of ICLRDataTarget // ClrDataAccess will take a take a ref on this and delete it when it's released. DataTargetAdapter * pDtAdapter = new (nothrow) DataTargetAdapter(pLegacyTarget); if (!pDtAdapter) { return E_OUTOFMEMORY; } ClrDataAccess* dacClass = new (nothrow) ClrDataAccess(pDtAdapter, pLegacyTarget); if (!dacClass) { delete pDtAdapter; return E_OUTOFMEMORY; } HRESULT hr = dacClass->Initialize(); if (FAILED(hr)) { dacClass->Release(); return hr; } *pClrDataAccess = dacClass; return S_OK; } //---------------------------------------------------------------------------- // // CLRDataCreateInstance. // Creates the IXClrData object // This is the legacy entrypoint to DAC, used by dbgeng/dbghelp (windbg, SOS, watson, etc). // //---------------------------------------------------------------------------- STDAPI DLLEXPORT CLRDataCreateInstance(REFIID iid, ICLRDataTarget * pLegacyTarget, void ** iface) { if ((pLegacyTarget == NULL) || (iface == NULL)) { return E_INVALIDARG; } *iface = NULL; ClrDataAccess * pClrDataAccess; HRESULT hr = CLRDataAccessCreateInstance(pLegacyTarget, &pClrDataAccess); if (hr != S_OK) { return hr; } hr = pClrDataAccess->QueryInterface(iid, iface); pClrDataAccess->Release(); return hr; } //---------------------------------------------------------------------------- // // OutOfProcessExceptionEventGetProcessIdAndThreadId - get ProcessID and ThreadID // // Arguments: // hProcess - process handle // hThread - thread handle // pPId - pointer to DWORD to store ProcessID // pThreadId - pointer to DWORD to store ThreadID // // Return Value: // TRUE if the operation is successful. // FALSE if it fails // //---------------------------------------------------------------------------- BOOL OutOfProcessExceptionEventGetProcessIdAndThreadId(HANDLE hProcess, HANDLE hThread, DWORD * pPId, DWORD * pThreadId) { _ASSERTE((pPId != NULL) && (pThreadId != NULL)); #ifdef TARGET_UNIX // UNIXTODO: mikem 1/13/15 Need appropriate PAL functions for getting ids *pPId = (DWORD)(SIZE_T)hProcess; *pThreadId = (DWORD)(SIZE_T)hThread; #else *pPId = GetProcessIdOfThread(hThread); *pThreadId = GetThreadId(hThread); #endif // TARGET_UNIX return TRUE; } // WER_RUNTIME_EXCEPTION_INFORMATION will be available from Win7 SDK once Win7 SDK is released. #if !defined(WER_RUNTIME_EXCEPTION_INFORMATION) typedef struct _WER_RUNTIME_EXCEPTION_INFORMATION { DWORD dwSize; HANDLE hProcess; HANDLE hThread; EXCEPTION_RECORD exceptionRecord; CONTEXT context; } WER_RUNTIME_EXCEPTION_INFORMATION, * PWER_RUNTIME_EXCEPTION_INFORMATION; #endif // !defined(WER_RUNTIME_EXCEPTION_INFORMATION) #ifndef TARGET_UNIX //---------------------------------------------------------------------------- // // OutOfProcessExceptionEventGetWatsonBucket - retrieve Watson buckets if it is a managed exception // // Arguments: // pContext - the context passed at helper module registration // pExceptionInformation - structure that contains information about the crash // pGM - pointer to the space to store retrieved Watson buckets // // Return Value: // S_OK if the operation is successful. // or S_FALSE if it is not a managed exception or Watson buckets cannot be found // else detailed error code. // //---------------------------------------------------------------------------- STDAPI OutOfProcessExceptionEventGetWatsonBucket(_In_ PDWORD pContext, _In_ const PWER_RUNTIME_EXCEPTION_INFORMATION pExceptionInformation, _Out_ GenericModeBlock * pGMB) { HANDLE hProcess = pExceptionInformation->hProcess; HANDLE hThread = pExceptionInformation->hThread; DWORD PId, ThreadId; if (!OutOfProcessExceptionEventGetProcessIdAndThreadId(hProcess, hThread, &PId, &ThreadId)) { return E_FAIL; } CLRDATA_ADDRESS baseAddressOfRuntime = (CLRDATA_ADDRESS)pContext; NewHolder<LiveProcDataTarget> dataTarget(NULL); dataTarget = new (nothrow) LiveProcDataTarget(hProcess, PId, baseAddressOfRuntime); if (dataTarget == NULL) { return E_OUTOFMEMORY; } NewHolder<ClrDataAccess> pClrDataAccess(NULL); HRESULT hr = CLRDataAccessCreateInstance(dataTarget, &pClrDataAccess); if (hr != S_OK) { if (hr == S_FALSE) { return E_FAIL; } else { return hr; } } if (!pClrDataAccess->IsExceptionFromManagedCode(&pExceptionInformation->exceptionRecord)) { return S_FALSE; } return pClrDataAccess->GetWatsonBuckets(ThreadId, pGMB); } //---------------------------------------------------------------------------- // // OutOfProcessExceptionEventCallback - claim the ownership of this event if current // runtime threw the unhandled exception // // Arguments: // pContext - the context passed at helper module registration // pExceptionInformation - structure that contains information about the crash // pbOwnershipClaimed - output parameter for claiming the ownership of this event // pwszEventName - name of the event. If this is NULL, pchSize cannot be NULL. // This parameter is valid only if * pbOwnershipClaimed is TRUE. // pchSize - the size of the buffer pointed by pwszEventName // pdwSignatureCount - the count of signature parameters. Valid values range from // 0 to 10. If the value returned is greater than 10, only the // 1st 10 parameters are used for bucketing parameters. This // parameter is valid only if * pbOwnershipClaimed is TRUE. // // Return Value: // S_OK on success, else detailed error code. // // Note: // This is the 1st function that is called into by WER. This API through its out // parameters, tells WER as to whether or not it is claiming the crash. If it does // claim the crash, WER uses the event name specified in the string pointed to by // pwszEventName for error reporting. WER then proceed to call the // OutOfProcessExceptionEventSignatureCallback to get the bucketing parameters from // the helper dll. // // This function follows the multiple call paradigms. WER may call into this function // with *pwszEventName pointer set to NULL. This is to indicate to the function, that // WER wants to know the buffer size needed by the function to populate the string // into the buffer. The function should return E_INSUFFICIENTBUFFER with the needed // buffer size in *pchSize. WER shall then allocate a buffer of size *pchSize for // pwszEventName and then call this function again at which point the function should // populate the string and return S_OK. // // Note that *pdOwnershipClaimed should be set to TRUE everytime this function is called // for the helper dll to claim ownership of bucketing. // // The Win7 WER spec is at // http://windows/windows7/docs/COSD%20Documents/Fundamentals/Feedback%20Services%20and%20Platforms/WER-CLR%20Integration%20Dev%20Spec.docx // // !!!READ THIS!!! // Since this is called by external modules it's important that we don't let any exceptions leak out (see Win8 95224). // //---------------------------------------------------------------------------- STDAPI OutOfProcessExceptionEventCallback(_In_ PDWORD pContext, _In_ const PWER_RUNTIME_EXCEPTION_INFORMATION pExceptionInformation, _Out_ BOOL * pbOwnershipClaimed, _Out_writes_(*pchSize) PWSTR pwszEventName, __inout PDWORD pchSize, _Out_ PDWORD pdwSignatureCount) { SUPPORTS_DAC_HOST_ONLY; if ((pContext == NULL) || (pExceptionInformation == NULL) || (pExceptionInformation->dwSize < sizeof(WER_RUNTIME_EXCEPTION_INFORMATION)) || (pbOwnershipClaimed == NULL) || (pchSize == NULL) || (pdwSignatureCount == NULL)) { return E_INVALIDARG; } *pbOwnershipClaimed = FALSE; GenericModeBlock gmb; HRESULT hr = E_FAIL; EX_TRY { // get Watson buckets if it is a managed exception hr = OutOfProcessExceptionEventGetWatsonBucket(pContext, pExceptionInformation, &gmb); } EX_CATCH_HRESULT(hr); if (hr != S_OK) { // S_FALSE means either it is not a managed exception or we do not have Watson buckets. // Since we have set pbOwnershipClaimed to FALSE, we return S_OK to WER. if (hr == S_FALSE) { hr = S_OK; } return hr; } if ((pwszEventName == NULL) || (*pchSize <= wcslen(gmb.wzEventTypeName))) { *pchSize = static_cast<DWORD>(wcslen(gmb.wzEventTypeName)) + 1; return HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER); } // copy custom event name wcscpy_s(pwszEventName, *pchSize, gmb.wzEventTypeName); *pdwSignatureCount = GetCountBucketParamsForEvent(gmb.wzEventTypeName); *pbOwnershipClaimed = TRUE; return S_OK; } //---------------------------------------------------------------------------- // // OutOfProcessExceptionEventCallback - provide custom Watson buckets // // Arguments: // pContext - the context passed at helper module registration // pExceptionInformation - structure that contains information about the crash // dwIndex - the index of the bucketing parameter being requested. Valid values are // from 0 to 9 // pwszName - pointer to the name of the bucketing parameter // pchName - pointer to character count of the pwszName buffer. If pwszName points to // null, *pchName represents the buffer size (represented in number of characters) // needed to populate the name in pwszName. // pwszValue - pointer to the value of the pwszName bucketing parameter // pchValue - pointer to the character count of the pwszValue buffer. If pwszValue points // to null, *pchValue represents the buffer size (represented in number of // characters) needed to populate the value in pwszValue. // // Return Value: // S_OK on success, else detailed error code. // // Note: // This function is called by WER only if the call to OutOfProcessExceptionEventCallback() // was successful and the value of *pbOwnershipClaimed was TRUE. This function is called // pdwSignatureCount times to collect the bucketing parameters from the helper dll. // // This function also follows the multiple call paradigm as described for the // OutOfProcessExceptionEventCallback() function. The buffer sizes needed for // this function are of the pwszName and pwszValue buffers. // // !!!READ THIS!!! // Since this is called by external modules it's important that we don't let any exceptions leak out (see Win8 95224). // //---------------------------------------------------------------------------- STDAPI OutOfProcessExceptionEventSignatureCallback(_In_ PDWORD pContext, _In_ const PWER_RUNTIME_EXCEPTION_INFORMATION pExceptionInformation, _In_ DWORD dwIndex, _Out_writes_(*pchName) PWSTR pwszName, __inout PDWORD pchName, _Out_writes_(*pchValue) PWSTR pwszValue, __inout PDWORD pchValue) { SUPPORTS_DAC_HOST_ONLY; if ((pContext == NULL) || (pExceptionInformation == NULL) || (pExceptionInformation->dwSize < sizeof(WER_RUNTIME_EXCEPTION_INFORMATION)) || (pchName == NULL) || (pchValue == NULL)) { return E_INVALIDARG; } if ((pwszName == NULL) || (*pchName == 0)) { *pchName = 1; return HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER); } GenericModeBlock gmb; const PWSTR pwszBucketValues[] = {gmb.wzP1, gmb.wzP2, gmb.wzP3, gmb.wzP4, gmb.wzP5, gmb.wzP6, gmb.wzP7, gmb.wzP8, gmb.wzP9, gmb.wzP10}; HRESULT hr = E_FAIL; EX_TRY { // get Watson buckets if it is a managed exception hr = OutOfProcessExceptionEventGetWatsonBucket(pContext, pExceptionInformation, &gmb); } EX_CATCH_HRESULT(hr); // it's possible for the OS to kill // the faulting process before WER crash reporting has completed. _ASSERTE(hr == S_OK || hr == CORDBG_E_READVIRTUAL_FAILURE); if (hr != S_OK) { // S_FALSE means either it is not a managed exception or we do not have Watson buckets. // Either case is a logic error becuase this function is called by WER only if the call // to OutOfProcessExceptionEventCallback() was successful and the value of // *pbOwnershipClaimed was TRUE. if (hr == S_FALSE) { hr = E_FAIL; } return hr; } DWORD paramCount = GetCountBucketParamsForEvent(gmb.wzEventTypeName); if (dwIndex >= paramCount) { _ASSERTE(!"dwIndex is out of range"); return E_INVALIDARG; } // Return pwszName as an emptry string to let WER use localized version of "Parameter n" *pwszName = W('\0'); if ((pwszValue == NULL) || (*pchValue <= wcslen(pwszBucketValues[dwIndex]))) { *pchValue = static_cast<DWORD>(wcslen(pwszBucketValues[dwIndex]))+ 1; return HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER); } // copy custom Watson bucket value wcscpy_s(pwszValue, *pchValue, pwszBucketValues[dwIndex]); return S_OK; } #endif // TARGET_UNIX //---------------------------------------------------------------------------- // // OutOfProcessExceptionEventCallback - provide custom debugger launch string // // Arguments: // pContext - the context passed at helper module registration // pExceptionInformation - structure that contains information about the crash // pbCustomDebuggerNeeded - pointer to a BOOL. If this BOOL is set to TRUE, then // a custom debugger launch option is needed by the // process. In that case, the subsequent parameters will // be meaningfully used. If this is FALSE, the subsequent // parameters will be ignored. // pwszDebuggerLaunch - pointer to a string that will be used to launch the debugger, // if the debugger is launched. The value of this string overrides // the default debugger launch string used by WER. // pchSize - pointer to the character count of the pwszDebuggerLaunch buffer. If // pwszDebuggerLaunch points to null, *pchSize represents the buffer size // (represented in number of characters) needed to populate the debugger // launch string in pwszDebuggerLaunch. // pbAutoLaunchDebugger - pointer to a BOOL. If this BOOL is set to TRUE, WER will // directly launch the debugger. If set to FALSE, WER will show // the debug option to the user in the WER UI. // // Return Value: // S_OK on success, else detailed error code. // // Note: // This function is called into by WER only if the call to OutOfProcessExceptionEventCallback() // was successful and the value of *pbOwnershipClaimed was TRUE. This function allows the helper // dll to customize the debugger launch options including the launch string. // // This function also follows the multiple call paradigm as described for the // OutOfProcessExceptionEventCallback() function. The buffer sizes needed for // this function are of the pwszName and pwszValue buffers. // //---------------------------------------------------------------------------- STDAPI OutOfProcessExceptionEventDebuggerLaunchCallback(_In_ PDWORD pContext, _In_ const PWER_RUNTIME_EXCEPTION_INFORMATION pExceptionInformation, _Out_ BOOL * pbCustomDebuggerNeeded, _Out_writes_opt_(*pchSize) PWSTR pwszDebuggerLaunch, __inout PDWORD pchSize, _Out_ BOOL * pbAutoLaunchDebugger) { SUPPORTS_DAC_HOST_ONLY; if ((pContext == NULL) || (pExceptionInformation == NULL) || (pExceptionInformation->dwSize < sizeof(WER_RUNTIME_EXCEPTION_INFORMATION)) || (pbCustomDebuggerNeeded == NULL) || (pwszDebuggerLaunch == NULL) || (pchSize == NULL) || (pbAutoLaunchDebugger == NULL)) { return E_INVALIDARG; } // Starting from CLRv4 managed debugger string and setting are unified with native debuggers. // There is no need to provide custom debugger string for WER. *pbCustomDebuggerNeeded = FALSE; return S_OK; } // DacHandleEnum #include "comcallablewrapper.h" DacHandleWalker::DacHandleWalker() : mDac(0), m_instanceAge(0), mMap(0), mIndex(0), mTypeMask(0), mGenerationFilter(-1), mChunkIndex(0), mCurr(0), mIteratorIndex(0) { SUPPORTS_DAC; } DacHandleWalker::~DacHandleWalker() { SUPPORTS_DAC; HandleChunkHead *curr = mHead.Next; while (curr) { HandleChunkHead *tmp = curr; curr = curr->Next; delete tmp; } } HRESULT DacHandleWalker::Init(ClrDataAccess *dac, UINT types[], UINT typeCount) { SUPPORTS_DAC; if (dac == NULL || types == NULL) return E_POINTER; mDac = dac; m_instanceAge = dac->m_instanceAge; return Init(BuildTypemask(types, typeCount)); } HRESULT DacHandleWalker::Init(ClrDataAccess *dac, UINT types[], UINT typeCount, int gen) { SUPPORTS_DAC; if (gen < 0 || gen > (int)*g_gcDacGlobals->max_gen) return E_INVALIDARG; mGenerationFilter = gen; return Init(dac, types, typeCount); } HRESULT DacHandleWalker::Init(UINT32 typemask) { SUPPORTS_DAC; mMap = g_gcDacGlobals->handle_table_map; mTypeMask = typemask; return S_OK; } UINT32 DacHandleWalker::BuildTypemask(UINT types[], UINT typeCount) { SUPPORTS_DAC; UINT32 mask = 0; for (UINT i = 0; i < typeCount; ++i) { _ASSERTE(types[i] < 32); mask |= (1 << types[i]); } return mask; } HRESULT DacHandleWalker::Next(unsigned int celt, SOSHandleData handles[], unsigned int *pceltFetched) { SUPPORTS_DAC; if (handles == NULL || pceltFetched == NULL) return E_POINTER; SOSHelperEnter(); hr = DoHandleWalk<SOSHandleData, unsigned int, DacHandleWalker::EnumCallbackSOS>(celt, handles, pceltFetched); SOSHelperLeave(); return hr; } bool DacHandleWalker::FetchMoreHandles(HANDLESCANPROC callback) { SUPPORTS_DAC; // The table slots are based on the number of GC heaps in the process. int max_slots = 1; #ifdef FEATURE_SVR_GC if (GCHeapUtilities::IsServerHeap()) max_slots = GCHeapCount(); #endif // FEATURE_SVR_GC // Reset the Count on all cached chunks. We reuse chunks after allocating // them, and the count is the only thing which needs resetting. for (HandleChunkHead *curr = &mHead; curr; curr = curr->Next) curr->Count = 0; DacHandleWalkerParam param(&mHead); do { // Have we advanced past the end of the current bucket? if (mMap && mIndex >= INITIAL_HANDLE_TABLE_ARRAY_SIZE) { mIndex = 0; mMap = mMap->pNext; } // Have we walked the entire handle table map? if (mMap == NULL) { mCurr = NULL; return false; } if (mMap->pBuckets[mIndex] != NULL) { for (int i = 0; i < max_slots; ++i) { DPTR(dac_handle_table) hTable = mMap->pBuckets[mIndex]->pTable[i]; if (hTable) { // Yikes! The handle table callbacks don't produce the handle type or // the AppDomain that we need, and it's too difficult to propagate out // these things (especially the type) without worrying about performance // implications for the GC. Instead we'll have the callback walk each // type individually. There are only a few handle types, and the handle // table has a fast-path for only walking a single type anyway. UINT32 handleType = 0; for (UINT32 mask = mTypeMask; mask; mask >>= 1, handleType++) { if (mask & 1) { dac_handle_table *pTable = hTable; PTR_AppDomain pDomain = AppDomain::GetCurrentDomain(); param.AppDomain = TO_CDADDR(pDomain.GetAddr()); param.Type = handleType; // Either enumerate the handles regularly, or walk the handle // table as the GC does if a generation filter was requested. if (mGenerationFilter != -1) HndScanHandlesForGC(hTable, callback, (LPARAM)&param, 0, &handleType, 1, mGenerationFilter, *g_gcDacGlobals->max_gen, 0); else HndEnumHandles(hTable, &handleType, 1, callback, (LPARAM)&param, 0, FALSE); } } } } } // Stop looping as soon as we have found data. We also stop if we have a failed HRESULT during // the callback (this should indicate OOM). mIndex++; } while (mHead.Count == 0 && SUCCEEDED(param.Result)); mCurr = mHead.Next; return true; } HRESULT DacHandleWalker::Skip(unsigned int celt) { return E_NOTIMPL; } HRESULT DacHandleWalker::Reset() { return E_NOTIMPL; } HRESULT DacHandleWalker::GetCount(unsigned int *pcelt) { return E_NOTIMPL; } void DacHandleWalker::GetRefCountedHandleInfo( OBJECTREF oref, unsigned int uType, unsigned int *pRefCount, unsigned int *pJupiterRefCount, BOOL *pIsPegged, BOOL *pIsStrong) { SUPPORTS_DAC; if (pJupiterRefCount) *pJupiterRefCount = 0; if (pIsPegged) *pIsPegged = FALSE; #if defined(FEATURE_COMINTEROP) || defined(FEATURE_COMWRAPPERS) || defined(FEATURE_OBJCMARSHAL) if (uType == HNDTYPE_REFCOUNTED) { #if defined(FEATURE_COMINTEROP) // get refcount from the CCW PTR_ComCallWrapper pWrap = ComCallWrapper::GetWrapperForObject(oref); if (pWrap != NULL) { if (pRefCount) *pRefCount = (unsigned int)pWrap->GetRefCount(); if (pIsStrong) *pIsStrong = pWrap->IsWrapperActive(); return; } #endif #if defined(FEATURE_OBJCMARSHAL) // [TODO] FEATURE_OBJCMARSHAL #endif // FEATURE_OBJCMARSHAL } #endif // FEATURE_COMINTEROP || FEATURE_COMWRAPPERS || FEATURE_OBJCMARSHAL if (pRefCount) *pRefCount = 0; if (pIsStrong) *pIsStrong = FALSE; } void CALLBACK DacHandleWalker::EnumCallbackSOS(PTR_UNCHECKED_OBJECTREF handle, uintptr_t *pExtraInfo, uintptr_t param1, uintptr_t param2) { SUPPORTS_DAC; DacHandleWalkerParam *param = (DacHandleWalkerParam *)param1; HandleChunkHead *curr = param->Curr; // If we failed on a previous call (OOM) don't keep trying to allocate, it's not going to work. if (FAILED(param->Result)) return; // We've moved past the size of the current chunk. We'll allocate a new chunk // and stuff the handles there. These are cleaned up by the destructor if (curr->Count >= (curr->Size/sizeof(SOSHandleData))) { if (curr->Next == NULL) { HandleChunk *next = new (nothrow) HandleChunk; if (next != NULL) { curr->Next = next; } else { param->Result = E_OUTOFMEMORY; return; } } curr = param->Curr = param->Curr->Next; } // Fill the current handle. SOSHandleData *dataArray = (SOSHandleData*)curr->pData; SOSHandleData &data = dataArray[curr->Count++]; data.Handle = TO_CDADDR(handle.GetAddr()); data.Type = param->Type; if (param->Type == HNDTYPE_DEPENDENT) data.Secondary = GetDependentHandleSecondary(handle.GetAddr()).GetAddr(); #ifdef FEATURE_COMINTEROP else if (param->Type == HNDTYPE_WEAK_NATIVE_COM) data.Secondary = HndGetHandleExtraInfo(handle.GetAddr()); #endif // FEATURE_COMINTEROP else data.Secondary = 0; data.AppDomain = param->AppDomain; GetRefCountedHandleInfo((OBJECTREF)*handle, param->Type, &data.RefCount, &data.JupiterRefCount, &data.IsPegged, &data.StrongReference); data.StrongReference |= (BOOL)IsAlwaysStrongReference(param->Type); } DacStackReferenceWalker::DacStackReferenceWalker(ClrDataAccess *dac, DWORD osThreadID) : mDac(dac), m_instanceAge(dac ? dac->m_instanceAge : 0), mThread(0), mErrors(0), mEnumerated(false), mChunkIndex(0), mCurr(0), mIteratorIndex(0) { Thread *curr = NULL; for (curr = ThreadStore::GetThreadList(curr); curr; curr = ThreadStore::GetThreadList(curr)) { if (curr->GetOSThreadId() == osThreadID) { mThread = curr; break; } } } DacStackReferenceWalker::~DacStackReferenceWalker() { StackRefChunkHead *curr = mHead.next; while (curr) { StackRefChunkHead *tmp = curr; curr = curr->next; delete tmp; } } HRESULT DacStackReferenceWalker::Init() { if (!mThread) return E_INVALIDARG; return mHeap.Init(); } HRESULT STDMETHODCALLTYPE DacStackReferenceWalker::Skip(unsigned int count) { return E_NOTIMPL; } HRESULT STDMETHODCALLTYPE DacStackReferenceWalker::Reset() { return E_NOTIMPL; } HRESULT DacStackReferenceWalker::GetCount(unsigned int *pCount) { if (!pCount) return E_POINTER; SOSHelperEnter(); if (!mEnumerated) { // Fill out our data structures. WalkStack<unsigned int, SOSStackRefData>(0, NULL, DacStackReferenceWalker::GCReportCallbackSOS, DacStackReferenceWalker::GCEnumCallbackSOS); } unsigned int count = 0; for(StackRefChunkHead *curr = &mHead; curr; curr = curr->next) count += curr->count; *pCount = count; SOSHelperLeave(); return hr; } HRESULT DacStackReferenceWalker::Next(unsigned int count, SOSStackRefData stackRefs[], unsigned int *pFetched) { if (stackRefs == NULL || pFetched == NULL) return E_POINTER; SOSHelperEnter(); hr = DoStackWalk<unsigned int, SOSStackRefData, DacStackReferenceWalker::GCReportCallbackSOS, DacStackReferenceWalker::GCEnumCallbackSOS> (count, stackRefs, pFetched); SOSHelperLeave(); return hr; } HRESULT DacStackReferenceWalker::EnumerateErrors(ISOSStackRefErrorEnum **ppEnum) { if (!ppEnum) return E_POINTER; SOSHelperEnter(); if (mThread) { // Fill out our data structures. WalkStack<unsigned int, SOSStackRefData>(0, NULL, DacStackReferenceWalker::GCReportCallbackSOS, DacStackReferenceWalker::GCEnumCallbackSOS); } DacStackReferenceErrorEnum *pEnum = new DacStackReferenceErrorEnum(this, mErrors); hr = pEnum->QueryInterface(__uuidof(ISOSStackRefErrorEnum), (void**)ppEnum); SOSHelperLeave(); return hr; } CLRDATA_ADDRESS DacStackReferenceWalker::ReadPointer(TADDR addr) { ULONG32 bytesRead = 0; TADDR result = 0; HRESULT hr = mDac->m_pTarget->ReadVirtual(addr, (BYTE*)&result, sizeof(TADDR), &bytesRead); if (FAILED(hr) || (bytesRead != sizeof(TADDR))) return (CLRDATA_ADDRESS)~0; return TO_CDADDR(result); } void DacStackReferenceWalker::GCEnumCallbackSOS(LPVOID hCallback, OBJECTREF *pObject, uint32_t flags, DacSlotLocation loc) { GCCONTEXT *gcctx = (GCCONTEXT *)hCallback; DacScanContext *dsc = (DacScanContext*)gcctx->sc; // Yuck. The GcInfoDecoder reports a local pointer for registers (as it's reading out of the REGDISPLAY // in the stack walk), and it reports a TADDR for stack locations. This is architecturally difficulty // to fix, so we are leaving it for now. TADDR addr = 0; TADDR obj = 0; if (loc.targetPtr) { addr = (TADDR)pObject; obj = TO_TADDR(dsc->pWalker->ReadPointer((CORDB_ADDRESS)addr)); } else { obj = pObject->GetAddr(); } if (flags & GC_CALL_INTERIOR) { CORDB_ADDRESS fixed_obj = 0; HRESULT hr = dsc->pWalker->mHeap.ListNearObjects((CORDB_ADDRESS)obj, NULL, &fixed_obj, NULL); // If we failed...oh well, SOS won't mind. We'll just report the interior pointer as is. if (SUCCEEDED(hr)) obj = TO_TADDR(fixed_obj); } SOSStackRefData *data = dsc->pWalker->GetNextObject<SOSStackRefData>(dsc); if (data != NULL) { // Report where the object and where it was found. data->HasRegisterInformation = true; data->Register = loc.reg; data->Offset = loc.regOffset; data->Address = TO_CDADDR(addr); data->Object = TO_CDADDR(obj); data->Flags = flags; // Report the frame that the data came from. data->StackPointer = TO_CDADDR(dsc->sp); if (dsc->pFrame) { data->SourceType = SOS_StackSourceFrame; data->Source = dac_cast<PTR_Frame>(dsc->pFrame).GetAddr(); } else { data->SourceType = SOS_StackSourceIP; data->Source = TO_CDADDR(dsc->pc); } } } void DacStackReferenceWalker::GCReportCallbackSOS(PTR_PTR_Object ppObj, ScanContext *sc, uint32_t flags) { DacScanContext *dsc = (DacScanContext*)sc; CLRDATA_ADDRESS obj = dsc->pWalker->ReadPointer(ppObj.GetAddr()); if (flags & GC_CALL_INTERIOR) { CORDB_ADDRESS fixed_addr = 0; HRESULT hr = dsc->pWalker->mHeap.ListNearObjects((CORDB_ADDRESS)obj, NULL, &fixed_addr, NULL); // If we failed...oh well, SOS won't mind. We'll just report the interior pointer as is. if (SUCCEEDED(hr)) obj = TO_CDADDR(fixed_addr); } SOSStackRefData *data = dsc->pWalker->GetNextObject<SOSStackRefData>(dsc); if (data != NULL) { data->HasRegisterInformation = false; data->Register = 0; data->Offset = 0; data->Address = ppObj.GetAddr(); data->Object = obj; data->Flags = flags; data->StackPointer = TO_CDADDR(dsc->sp); if (dsc->pFrame) { data->SourceType = SOS_StackSourceFrame; data->Source = dac_cast<PTR_Frame>(dsc->pFrame).GetAddr(); } else { data->SourceType = SOS_StackSourceIP; data->Source = TO_CDADDR(dsc->pc); } } } StackWalkAction DacStackReferenceWalker::Callback(CrawlFrame *pCF, VOID *pData) { // // KEEP IN SYNC WITH GcStackCrawlCallBack in vm\gcscan.cpp // GCCONTEXT *gcctx = (GCCONTEXT*)pData; DacScanContext *dsc = (DacScanContext*)gcctx->sc; MethodDesc *pMD = pCF->GetFunction(); gcctx->sc->pMD = pMD; PREGDISPLAY pRD = pCF->GetRegisterSet(); dsc->sp = (TADDR)GetRegdisplaySP(pRD);; dsc->pc = PCODEToPINSTR(GetControlPC(pRD)); ResetPointerHolder<CrawlFrame*> rph(&gcctx->cf); gcctx->cf = pCF; bool fReportGCReferences = true; #if defined(FEATURE_EH_FUNCLETS) // On Win64 and ARM, we may have unwound this crawlFrame and thus, shouldn't report the invalid // references it may contain. // todo. fReportGCReferences = pCF->ShouldCrawlframeReportGCReferences(); #endif // defined(FEATURE_EH_FUNCLETS) Frame *pFrame = ((DacScanContext*)gcctx->sc)->pFrame = pCF->GetFrame(); EX_TRY { if (fReportGCReferences) { if (pCF->IsFrameless()) { ICodeManager * pCM = pCF->GetCodeManager(); _ASSERTE(pCM != NULL); unsigned flags = pCF->GetCodeManagerFlags(); pCM->EnumGcRefs(pCF->GetRegisterSet(), pCF->GetCodeInfo(), flags, dsc->pEnumFunc, pData); } else { pFrame->GcScanRoots(gcctx->f, gcctx->sc); } } } EX_CATCH { SOSStackErrorList *err = new SOSStackErrorList; err->pNext = NULL; if (pFrame) { err->error.SourceType = SOS_StackSourceFrame; err->error.Source = dac_cast<PTR_Frame>(pFrame).GetAddr(); } else { err->error.SourceType = SOS_StackSourceIP; err->error.Source = TO_CDADDR(dsc->pc); } if (dsc->pWalker->mErrors == NULL) { dsc->pWalker->mErrors = err; } else { // This exception case should be non-existent. It only happens when there is either // a clr!Frame on the callstack which is not properly dac-ized, or when a call down // EnumGcRefs causes a data read exception. Since this is so rare, we don't worry // about making this code very efficient. SOSStackErrorList *curr = dsc->pWalker->mErrors; while (curr->pNext) curr = curr->pNext; curr->pNext = err; } } EX_END_CATCH(SwallowAllExceptions) #if 0 // todo // If we're executing a LCG dynamic method then we must promote the associated resolver to ensure it // doesn't get collected and yank the method code out from under us). // Be careful to only promote the reference -- we can also be called to relocate the reference and // that can lead to all sorts of problems since we could be racing for the relocation with the long // weak handle we recover the reference from. Promoting the reference is enough, the handle in the // reference will be relocated properly as long as we keep it alive till the end of the collection // as long as the reference is actually maintained by the long weak handle. if (pMD) { BOOL fMaybeCollectibleMethod = TRUE; // If this is a frameless method then the jitmanager can answer the question of whether // or not this is LCG simply by looking at the heap where the code lives, however there // is also the prestub case where we need to explicitly look at the MD for stuff that isn't // ngen'd if (pCF->IsFrameless() && pMD->IsLCGMethod()) { fMaybeCollectibleMethod = ExecutionManager::IsCollectibleMethod(pCF->GetMethodToken()); } if (fMaybeCollectibleMethod && pMD->IsLCGMethod()) { PTR_Object obj = OBJECTREFToObject(pMD->AsDynamicMethodDesc()->GetLCGMethodResolver()->GetManagedResolver()); dsc->pWalker->ReportObject(obj); } else { if (fMaybeCollectibleMethod) { PTR_Object obj = pMD->GetLoaderAllocator()->GetExposedObject(); dsc->pWalker->ReportObject(obj); } if (fReportGCReferences) { GenericParamContextType paramContextType = GENERIC_PARAM_CONTEXT_NONE; if (pCF->IsFrameless()) { // We need to grab the Context Type here because there are cases where the MethodDesc // is shared, and thus indicates there should be an instantion argument, but the JIT // was still allowed to optimize it away and we won't grab it below because we're not // reporting any references from this frame. paramContextType = pCF->GetCodeManager()->GetParamContextType(pCF->GetRegisterSet(), pCF->GetCodeInfo()); } else { if (pMD->RequiresInstMethodDescArg()) paramContextType = GENERIC_PARAM_CONTEXT_METHODDESC; else if (pMD->RequiresInstMethodTableArg()) paramContextType = GENERIC_PARAM_CONTEXT_METHODTABLE; } // Handle the case where the method is a static shared generic method and we need to keep the type of the generic parameters alive if (paramContextType == GENERIC_PARAM_CONTEXT_METHODDESC) { MethodDesc *pMDReal = dac_cast<PTR_MethodDesc>(pCF->GetParamTypeArg()); _ASSERTE((pMDReal != NULL) || !pCF->IsFrameless()); if (pMDReal != NULL) { PTR_Object obj = pMDReal->GetLoaderAllocator()->GetExposedObject(); dsc->pWalker->ReportObject(obj); } } else if (paramContextType == GENERIC_PARAM_CONTEXT_METHODTABLE) { MethodTable *pMTReal = dac_cast<PTR_MethodTable>(pCF->GetParamTypeArg()); _ASSERTE((pMTReal != NULL) || !pCF->IsFrameless()); if (pMTReal != NULL) { PTR_Object obj = pMTReal->GetLoaderAllocator()->GetExposedObject(); dsc->pWalker->ReportObject(obj); } } } } } #endif return SWA_CONTINUE; } DacStackReferenceErrorEnum::DacStackReferenceErrorEnum(DacStackReferenceWalker *pEnum, SOSStackErrorList *pErrors) : mEnum(pEnum), mHead(pErrors), mCurr(pErrors) { _ASSERTE(mEnum); if (mHead != NULL) mEnum->AddRef(); } DacStackReferenceErrorEnum::~DacStackReferenceErrorEnum() { if (mHead) mEnum->Release(); } HRESULT DacStackReferenceErrorEnum::Skip(unsigned int count) { unsigned int i = 0; for (i = 0; i < count && mCurr; ++i) mCurr = mCurr->pNext; return i < count ? S_FALSE : S_OK; } HRESULT DacStackReferenceErrorEnum::Reset() { mCurr = mHead; return S_OK; } HRESULT DacStackReferenceErrorEnum::GetCount(unsigned int *pCount) { SOSStackErrorList *curr = mHead; unsigned int count = 0; while (curr) { curr = curr->pNext; count++; } *pCount = count; return S_OK; } HRESULT DacStackReferenceErrorEnum::Next(unsigned int count, SOSStackRefError ref[], unsigned int *pFetched) { if (pFetched == NULL || ref == NULL) return E_POINTER; unsigned int i; for (i = 0; i < count && mCurr; ++i, mCurr = mCurr->pNext) ref[i] = mCurr->error; *pFetched = i; return i < count ? S_FALSE : S_OK; }
1
dotnet/runtime
66,234
Remove compiler warning suppression
Contributes to https://github.com/dotnet/runtime/issues/66154 All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address. ~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~ Upstream PR: https://github.com/libunwind/libunwind/pull/333 /cc @GrabYourPitchforks
AaronRobinsonMSFT
2022-03-05T03:45:49Z
2022-03-09T00:57:12Z
220e67755ae6323a01011b83070dff6f84b02519
853e494abd1c1e12d28a76829b4680af7f694afa
Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154 All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address. ~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~ Upstream PR: https://github.com/libunwind/libunwind/pull/333 /cc @GrabYourPitchforks
./src/coreclr/jit/eeinterface.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX EEInterface XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ // ONLY FUNCTIONS common to all variants of the JIT (EXE, DLL) should go here) // otherwise they belong in the corresponding directory. #include "jitpch.h" #ifdef _MSC_VER #pragma hdrstop #endif #if defined(DEBUG) || defined(FEATURE_JIT_METHOD_PERF) || defined(FEATURE_SIMD) #pragma warning(push) #pragma warning(disable : 4701) // difficult to get rid of C4701 with 'sig' below /*****************************************************************************/ /***************************************************************************** * * Filter wrapper to handle exception filtering. * On Unix compilers don't support SEH. */ struct FilterSuperPMIExceptionsParam_eeinterface { Compiler* pThis; Compiler::Info* pJitInfo; bool hasThis; size_t siglength; CORINFO_SIG_INFO sig; CORINFO_ARG_LIST_HANDLE argLst; CORINFO_METHOD_HANDLE hnd; const char* returnType; const char** pArgNames; EXCEPTION_POINTERS exceptionPointers; }; const char* Compiler::eeGetMethodFullName(CORINFO_METHOD_HANDLE hnd) { const char* className; const char* methodName = eeGetMethodName(hnd, &className); if ((eeGetHelperNum(hnd) != CORINFO_HELP_UNDEF) || eeIsNativeMethod(hnd)) { return methodName; } FilterSuperPMIExceptionsParam_eeinterface param; param.returnType = nullptr; param.pThis = this; param.hasThis = false; param.siglength = 0; param.hnd = hnd; param.pJitInfo = &info; size_t length = 0; unsigned i; /* Generating the full signature is a two-pass process. First we have to walk the components in order to assess the total size, then we allocate the buffer and copy the elements into it. */ /* Right now there is a race-condition in the EE, className can be nullptr */ /* initialize length with length of className and '.' */ if (className) { length = strlen(className) + 1; } else { assert(strlen("<NULL>.") == 7); length = 7; } /* add length of methodName and opening bracket */ length += strlen(methodName) + 1; bool success = eeRunWithSPMIErrorTrap<FilterSuperPMIExceptionsParam_eeinterface>( [](FilterSuperPMIExceptionsParam_eeinterface* pParam) { /* figure out the signature */ pParam->pThis->eeGetMethodSig(pParam->hnd, &pParam->sig); // allocate space to hold the class names for each of the parameters if (pParam->sig.numArgs > 0) { pParam->pArgNames = pParam->pThis->getAllocator(CMK_DebugOnly).allocate<const char*>(pParam->sig.numArgs); } else { pParam->pArgNames = nullptr; } unsigned i; pParam->argLst = pParam->sig.args; for (i = 0; i < pParam->sig.numArgs; i++) { var_types type = pParam->pThis->eeGetArgType(pParam->argLst, &pParam->sig); switch (type) { case TYP_REF: case TYP_STRUCT: { CORINFO_CLASS_HANDLE clsHnd = pParam->pThis->eeGetArgClass(&pParam->sig, pParam->argLst); // For some SIMD struct types we can get a nullptr back from eeGetArgClass on Linux/X64 if (clsHnd != NO_CLASS_HANDLE) { const char* clsName = pParam->pThis->eeGetClassName(clsHnd); if (clsName != nullptr) { pParam->pArgNames[i] = clsName; break; } } } FALLTHROUGH; default: pParam->pArgNames[i] = varTypeName(type); break; } pParam->siglength += strlen(pParam->pArgNames[i]); pParam->argLst = pParam->pJitInfo->compCompHnd->getArgNext(pParam->argLst); } /* add ',' if there is more than one argument */ if (pParam->sig.numArgs > 1) { pParam->siglength += (pParam->sig.numArgs - 1); } var_types retType = JITtype2varType(pParam->sig.retType); if (retType != TYP_VOID) { switch (retType) { case TYP_REF: case TYP_STRUCT: { CORINFO_CLASS_HANDLE clsHnd = pParam->sig.retTypeClass; if (clsHnd != NO_CLASS_HANDLE) { const char* clsName = pParam->pThis->eeGetClassName(clsHnd); if (clsName != nullptr) { pParam->returnType = clsName; break; } } } FALLTHROUGH; default: pParam->returnType = varTypeName(retType); break; } pParam->siglength += strlen(pParam->returnType) + 1; // don't forget the delimiter ':' } // Does it have a 'this' pointer? Don't count explicit this, which has the this pointer type as the first // element of the arg type list if (pParam->sig.hasThis() && !pParam->sig.hasExplicitThis()) { assert(strlen(":this") == 5); pParam->siglength += 5; pParam->hasThis = true; } }, &param); if (!success) { param.siglength = 0; } /* add closing bracket and null terminator */ length += param.siglength + 2; char* retName = getAllocator(CMK_DebugOnly).allocate<char>(length); /* Now generate the full signature string in the allocated buffer */ if (className) { strcpy_s(retName, length, className); strcat_s(retName, length, ":"); } else { strcpy_s(retName, length, "<NULL>."); } strcat_s(retName, length, methodName); // append the signature strcat_s(retName, length, "("); if (param.siglength > 0) { param.argLst = param.sig.args; for (i = 0; i < param.sig.numArgs; i++) { var_types type = eeGetArgType(param.argLst, &param.sig); strcat_s(retName, length, param.pArgNames[i]); param.argLst = info.compCompHnd->getArgNext(param.argLst); if (i + 1 < param.sig.numArgs) { strcat_s(retName, length, ","); } } } strcat_s(retName, length, ")"); if (param.returnType != nullptr) { strcat_s(retName, length, ":"); strcat_s(retName, length, param.returnType); } if (param.hasThis) { strcat_s(retName, length, ":this"); } assert(strlen(retName) == (length - 1)); return (retName); } #pragma warning(pop) #endif // defined(DEBUG) || defined(FEATURE_JIT_METHOD_PERF) || defined(FEATURE_SIMD) /*****************************************************************************/
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX EEInterface XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ // ONLY FUNCTIONS common to all variants of the JIT (EXE, DLL) should go here) // otherwise they belong in the corresponding directory. #include "jitpch.h" #ifdef _MSC_VER #pragma hdrstop #endif #if defined(DEBUG) || defined(FEATURE_JIT_METHOD_PERF) || defined(FEATURE_SIMD) /*****************************************************************************/ /***************************************************************************** * * Filter wrapper to handle exception filtering. * On Unix compilers don't support SEH. */ struct FilterSuperPMIExceptionsParam_eeinterface { Compiler* pThis; Compiler::Info* pJitInfo; bool hasThis; size_t siglength; CORINFO_SIG_INFO sig; CORINFO_ARG_LIST_HANDLE argLst; CORINFO_METHOD_HANDLE hnd; const char* returnType; const char** pArgNames; EXCEPTION_POINTERS exceptionPointers; }; const char* Compiler::eeGetMethodFullName(CORINFO_METHOD_HANDLE hnd) { const char* className; const char* methodName = eeGetMethodName(hnd, &className); if ((eeGetHelperNum(hnd) != CORINFO_HELP_UNDEF) || eeIsNativeMethod(hnd)) { return methodName; } FilterSuperPMIExceptionsParam_eeinterface param; param.returnType = nullptr; param.pThis = this; param.hasThis = false; param.siglength = 0; param.hnd = hnd; param.pJitInfo = &info; size_t length = 0; unsigned i; /* Generating the full signature is a two-pass process. First we have to walk the components in order to assess the total size, then we allocate the buffer and copy the elements into it. */ /* Right now there is a race-condition in the EE, className can be nullptr */ /* initialize length with length of className and '.' */ if (className) { length = strlen(className) + 1; } else { assert(strlen("<NULL>.") == 7); length = 7; } /* add length of methodName and opening bracket */ length += strlen(methodName) + 1; bool success = eeRunWithSPMIErrorTrap<FilterSuperPMIExceptionsParam_eeinterface>( [](FilterSuperPMIExceptionsParam_eeinterface* pParam) { /* figure out the signature */ pParam->pThis->eeGetMethodSig(pParam->hnd, &pParam->sig); // allocate space to hold the class names for each of the parameters if (pParam->sig.numArgs > 0) { pParam->pArgNames = pParam->pThis->getAllocator(CMK_DebugOnly).allocate<const char*>(pParam->sig.numArgs); } else { pParam->pArgNames = nullptr; } unsigned i; pParam->argLst = pParam->sig.args; for (i = 0; i < pParam->sig.numArgs; i++) { var_types type = pParam->pThis->eeGetArgType(pParam->argLst, &pParam->sig); switch (type) { case TYP_REF: case TYP_STRUCT: { CORINFO_CLASS_HANDLE clsHnd = pParam->pThis->eeGetArgClass(&pParam->sig, pParam->argLst); // For some SIMD struct types we can get a nullptr back from eeGetArgClass on Linux/X64 if (clsHnd != NO_CLASS_HANDLE) { const char* clsName = pParam->pThis->eeGetClassName(clsHnd); if (clsName != nullptr) { pParam->pArgNames[i] = clsName; break; } } } FALLTHROUGH; default: pParam->pArgNames[i] = varTypeName(type); break; } pParam->siglength += strlen(pParam->pArgNames[i]); pParam->argLst = pParam->pJitInfo->compCompHnd->getArgNext(pParam->argLst); } /* add ',' if there is more than one argument */ if (pParam->sig.numArgs > 1) { pParam->siglength += (pParam->sig.numArgs - 1); } var_types retType = JITtype2varType(pParam->sig.retType); if (retType != TYP_VOID) { switch (retType) { case TYP_REF: case TYP_STRUCT: { CORINFO_CLASS_HANDLE clsHnd = pParam->sig.retTypeClass; if (clsHnd != NO_CLASS_HANDLE) { const char* clsName = pParam->pThis->eeGetClassName(clsHnd); if (clsName != nullptr) { pParam->returnType = clsName; break; } } } FALLTHROUGH; default: pParam->returnType = varTypeName(retType); break; } pParam->siglength += strlen(pParam->returnType) + 1; // don't forget the delimiter ':' } // Does it have a 'this' pointer? Don't count explicit this, which has the this pointer type as the first // element of the arg type list if (pParam->sig.hasThis() && !pParam->sig.hasExplicitThis()) { assert(strlen(":this") == 5); pParam->siglength += 5; pParam->hasThis = true; } }, &param); if (!success) { param.siglength = 0; } /* add closing bracket and null terminator */ length += param.siglength + 2; char* retName = getAllocator(CMK_DebugOnly).allocate<char>(length); /* Now generate the full signature string in the allocated buffer */ if (className) { strcpy_s(retName, length, className); strcat_s(retName, length, ":"); } else { strcpy_s(retName, length, "<NULL>."); } strcat_s(retName, length, methodName); // append the signature strcat_s(retName, length, "("); if (param.siglength > 0) { param.argLst = param.sig.args; for (i = 0; i < param.sig.numArgs; i++) { var_types type = eeGetArgType(param.argLst, &param.sig); strcat_s(retName, length, param.pArgNames[i]); param.argLst = info.compCompHnd->getArgNext(param.argLst); if (i + 1 < param.sig.numArgs) { strcat_s(retName, length, ","); } } } strcat_s(retName, length, ")"); if (param.returnType != nullptr) { strcat_s(retName, length, ":"); strcat_s(retName, length, param.returnType); } if (param.hasThis) { strcat_s(retName, length, ":this"); } assert(strlen(retName) == (length - 1)); return (retName); } #endif // defined(DEBUG) || defined(FEATURE_JIT_METHOD_PERF) || defined(FEATURE_SIMD) /*****************************************************************************/
1
dotnet/runtime
66,234
Remove compiler warning suppression
Contributes to https://github.com/dotnet/runtime/issues/66154 All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address. ~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~ Upstream PR: https://github.com/libunwind/libunwind/pull/333 /cc @GrabYourPitchforks
AaronRobinsonMSFT
2022-03-05T03:45:49Z
2022-03-09T00:57:12Z
220e67755ae6323a01011b83070dff6f84b02519
853e494abd1c1e12d28a76829b4680af7f694afa
Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154 All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address. ~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~ Upstream PR: https://github.com/libunwind/libunwind/pull/333 /cc @GrabYourPitchforks
./src/coreclr/jit/optimizer.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Optimizer XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ #include "jitpch.h" #ifdef _MSC_VER #pragma hdrstop #pragma warning(disable : 4701) #endif /*****************************************************************************/ void Compiler::optInit() { optLoopsMarked = false; fgHasLoops = false; loopAlignCandidates = 0; /* Initialize the # of tracked loops to 0 */ optLoopCount = 0; optLoopTable = nullptr; optCurLoopEpoch = 0; #ifdef DEBUG loopsAligned = 0; #endif /* Keep track of the number of calls and indirect calls made by this method */ optCallCount = 0; optIndirectCallCount = 0; optNativeCallCount = 0; optAssertionCount = 0; optAssertionDep = nullptr; optCSEstart = BAD_VAR_NUM; optCSEcount = 0; } DataFlow::DataFlow(Compiler* pCompiler) : m_pCompiler(pCompiler) { } //------------------------------------------------------------------------ // optSetBlockWeights: adjust block weights, as follows: // 1. A block that is not reachable from the entry block is marked "run rarely". // 2. If we're not using profile weights, then any block with a non-zero weight // that doesn't dominate all the return blocks has its weight dropped in half // (but only if the first block *does* dominate all the returns). // // Notes: // Depends on dominators, and fgReturnBlocks being set. // PhaseStatus Compiler::optSetBlockWeights() { noway_assert(opts.OptimizationEnabled()); assert(fgDomsComputed); assert(fgReturnBlocksComputed); #ifdef DEBUG bool changed = false; #endif bool firstBBDominatesAllReturns = true; const bool usingProfileWeights = fgIsUsingProfileWeights(); for (BasicBlock* const block : Blocks()) { /* Blocks that can't be reached via the first block are rarely executed */ if (!fgReachable(fgFirstBB, block)) { block->bbSetRunRarely(); } if (!usingProfileWeights && firstBBDominatesAllReturns) { // If the weight is already zero (and thus rarely run), there's no point scaling it. if (block->bbWeight != BB_ZERO_WEIGHT) { // If the block dominates all return blocks, leave the weight alone. Otherwise, // scale the weight by 0.5 as a heuristic that some other path gets some of the dynamic flow. // Note that `optScaleLoopBlocks` has a similar heuristic for loop blocks that don't dominate // their loop back edge. bool blockDominatesAllReturns = true; // Assume that we will dominate for (BasicBlockList* retBlocks = fgReturnBlocks; retBlocks != nullptr; retBlocks = retBlocks->next) { if (!fgDominate(block, retBlocks->block)) { blockDominatesAllReturns = false; break; } } if (block == fgFirstBB) { firstBBDominatesAllReturns = blockDominatesAllReturns; // Don't scale the weight of the first block, since it is guaranteed to execute. // If the first block does not dominate all the returns, we won't scale any of the function's // block weights. } else { // If we are not using profile weight then we lower the weight // of blocks that do not dominate a return block // if (!blockDominatesAllReturns) { INDEBUG(changed = true); // TODO-Cleanup: we should use: // block->scaleBBWeight(0.5); // since we are inheriting "from ourselves", but that leads to asm diffs due to minutely // different floating-point value in the calculation, and some code that compares weights // for equality. block->inheritWeightPercentage(block, 50); } } } } } #if DEBUG if (changed && verbose) { printf("\nAfter optSetBlockWeights:\n"); fgDispBasicBlocks(); printf("\n"); } /* Check that the flowgraph data (bbNum, bbRefs, bbPreds) is up-to-date */ fgDebugCheckBBlist(); #endif return PhaseStatus::MODIFIED_EVERYTHING; } //------------------------------------------------------------------------ // optScaleLoopBlocks: Scale the weight of loop blocks from 'begBlk' to 'endBlk'. // // Arguments: // begBlk - first block of range. Must be marked as a loop head (BBF_LOOP_HEAD). // endBlk - last block of range (inclusive). Must be reachable from `begBlk`. // // Operation: // Calculate the 'loop weight'. This is the amount to scale the weight of each block in the loop. // Our heuristic is that loops are weighted eight times more than straight-line code // (scale factor is BB_LOOP_WEIGHT_SCALE). If the loops are all properly formed this gives us these weights: // // 1 -- non-loop basic block // 8 -- single loop nesting // 64 -- double loop nesting // 512 -- triple loop nesting // void Compiler::optScaleLoopBlocks(BasicBlock* begBlk, BasicBlock* endBlk) { noway_assert(begBlk->bbNum <= endBlk->bbNum); noway_assert(begBlk->isLoopHead()); noway_assert(fgReachable(begBlk, endBlk)); noway_assert(!opts.MinOpts()); #ifdef DEBUG if (verbose) { printf("\nMarking a loop from " FMT_BB " to " FMT_BB, begBlk->bbNum, endBlk->bbNum); } #endif // Build list of back edges for block begBlk. flowList* backedgeList = nullptr; for (BasicBlock* const predBlock : begBlk->PredBlocks()) { // Is this a back edge? if (predBlock->bbNum >= begBlk->bbNum) { backedgeList = new (this, CMK_FlowList) flowList(predBlock, backedgeList); #if MEASURE_BLOCK_SIZE genFlowNodeCnt += 1; genFlowNodeSize += sizeof(flowList); #endif // MEASURE_BLOCK_SIZE } } // At least one backedge must have been found (the one from endBlk). noway_assert(backedgeList); auto reportBlockWeight = [&](BasicBlock* blk, const char* message) { #ifdef DEBUG if (verbose) { printf("\n " FMT_BB "(wt=" FMT_WT ")%s", blk->bbNum, blk->getBBWeight(this), message); } #endif // DEBUG }; for (BasicBlock* const curBlk : BasicBlockRangeList(begBlk, endBlk)) { // Don't change the block weight if it came from profile data. if (curBlk->hasProfileWeight()) { reportBlockWeight(curBlk, "; unchanged: has profile weight"); continue; } // Don't change the block weight if it's known to be rarely run. if (curBlk->isRunRarely()) { reportBlockWeight(curBlk, "; unchanged: run rarely"); continue; } // For curBlk to be part of a loop that starts at begBlk, curBlk must be reachable from begBlk and // (since this is a loop) begBlk must likewise be reachable from curBlk. if (fgReachable(curBlk, begBlk) && fgReachable(begBlk, curBlk)) { // If `curBlk` reaches any of the back edge blocks we set `reachable`. // If `curBlk` dominates any of the back edge blocks we set `dominates`. bool reachable = false; bool dominates = false; for (flowList* tmp = backedgeList; tmp != nullptr; tmp = tmp->flNext) { BasicBlock* backedge = tmp->getBlock(); reachable |= fgReachable(curBlk, backedge); dominates |= fgDominate(curBlk, backedge); if (dominates && reachable) { // No need to keep looking; we've already found all the info we need. break; } } if (reachable) { // If the block has BB_ZERO_WEIGHT, then it should be marked as rarely run, and skipped, above. noway_assert(curBlk->bbWeight > BB_ZERO_WEIGHT); weight_t scale = BB_LOOP_WEIGHT_SCALE; if (!dominates) { // If `curBlk` reaches but doesn't dominate any back edge to `endBlk` then there must be at least // some other path to `endBlk`, so don't give `curBlk` all the execution weight. scale = scale / 2; } curBlk->scaleBBWeight(scale); reportBlockWeight(curBlk, ""); } else { reportBlockWeight(curBlk, "; unchanged: back edge unreachable"); } } else { reportBlockWeight(curBlk, "; unchanged: block not in loop"); } } } //------------------------------------------------------------------------ // optUnmarkLoopBlocks: Unmark the blocks between 'begBlk' and 'endBlk' as part of a loop. // // Arguments: // begBlk - first block of range. Must be marked as a loop head (BBF_LOOP_HEAD). // endBlk - last block of range (inclusive). Must be reachable from `begBlk`. // // Operation: // A set of blocks that were previously marked as a loop are now to be unmarked, since we have decided that // for some reason this loop no longer exists. Basically we are just resetting the blocks bbWeight to their // previous values. // void Compiler::optUnmarkLoopBlocks(BasicBlock* begBlk, BasicBlock* endBlk) { noway_assert(begBlk->bbNum <= endBlk->bbNum); noway_assert(begBlk->isLoopHead()); noway_assert(!opts.MinOpts()); unsigned backEdgeCount = 0; for (BasicBlock* const predBlock : begBlk->PredBlocks()) { // Is this a backward edge? (from predBlock to begBlk) if (begBlk->bbNum > predBlock->bbNum) { continue; } // We only consider back-edges that are BBJ_COND or BBJ_ALWAYS for loops. if (!predBlock->KindIs(BBJ_COND, BBJ_ALWAYS)) { continue; } backEdgeCount++; } // Only unmark the loop blocks if we have exactly one loop back edge. if (backEdgeCount != 1) { #ifdef DEBUG if (verbose) { if (backEdgeCount > 0) { printf("\nNot removing loop at " FMT_BB ", due to an additional back edge", begBlk->bbNum); } else if (backEdgeCount == 0) { printf("\nNot removing loop at " FMT_BB ", due to no back edge", begBlk->bbNum); } } #endif return; } noway_assert(fgReachable(begBlk, endBlk)); #ifdef DEBUG if (verbose) { printf("\nUnmarking a loop from " FMT_BB " to " FMT_BB, begBlk->bbNum, endBlk->bbNum); } #endif for (BasicBlock* const curBlk : BasicBlockRangeList(begBlk, endBlk)) { // Stop if we go past the last block in the loop, as it may have been deleted. if (curBlk->bbNum > endBlk->bbNum) { break; } // Don't change the block weight if it's known to be rarely run. if (curBlk->isRunRarely()) { continue; } // Don't change the block weight if it came from profile data. if (curBlk->hasProfileWeight()) { continue; } // Don't unmark blocks that are maximum weight. if (curBlk->isMaxBBWeight()) { continue; } // For curBlk to be part of a loop that starts at begBlk, curBlk must be reachable from begBlk and // (since this is a loop) begBlk must likewise be reachable from curBlk. // if (fgReachable(curBlk, begBlk) && fgReachable(begBlk, curBlk)) { weight_t scale = 1.0 / BB_LOOP_WEIGHT_SCALE; if (!fgDominate(curBlk, endBlk)) { scale *= 2; } curBlk->scaleBBWeight(scale); JITDUMP("\n " FMT_BB "(wt=" FMT_WT ")", curBlk->bbNum, curBlk->getBBWeight(this)); } } JITDUMP("\n"); begBlk->unmarkLoopAlign(this DEBUG_ARG("Removed loop")); } /***************************************************************************************************** * * Function called to update the loop table and bbWeight before removing a block */ void Compiler::optUpdateLoopsBeforeRemoveBlock(BasicBlock* block, bool skipUnmarkLoop) { if (!optLoopsMarked) { return; } noway_assert(!opts.MinOpts()); bool removeLoop = false; // If an unreachable block is a loop entry or bottom then the loop is unreachable. // Special case: the block was the head of a loop - or pointing to a loop entry. for (unsigned loopNum = 0; loopNum < optLoopCount; loopNum++) { LoopDsc& loop = optLoopTable[loopNum]; // Some loops may have been already removed by loop unrolling or conditional folding. if (loop.lpFlags & LPFLG_REMOVED) { continue; } // Avoid printing to the JitDump unless we're actually going to change something. // If we call reportBefore, then we're going to change the loop table, and we should print the // `reportAfter` info as well. Only print the `reportBefore` info once, if multiple changes to // the table are made. INDEBUG(bool reportedBefore = false); auto reportBefore = [&]() { #ifdef DEBUG if (verbose && !reportedBefore) { printf("optUpdateLoopsBeforeRemoveBlock " FMT_BB " Before: ", block->bbNum); optPrintLoopInfo(loopNum); printf("\n"); reportedBefore = true; } #endif // DEBUG }; auto reportAfter = [&]() { #ifdef DEBUG if (verbose && reportedBefore) { printf("optUpdateLoopsBeforeRemoveBlock " FMT_BB " After: ", block->bbNum); optPrintLoopInfo(loopNum); printf("\n"); } #endif // DEBUG }; if (block == loop.lpEntry || block == loop.lpBottom) { reportBefore(); optMarkLoopRemoved(loopNum); reportAfter(); continue; } // If the loop is still in the table any block in the loop must be reachable. noway_assert((loop.lpEntry != block) && (loop.lpBottom != block)); if (loop.lpExit == block) { reportBefore(); assert(loop.lpExitCnt == 1); --loop.lpExitCnt; loop.lpExit = nullptr; } // If `block` flows to the loop entry then the whole loop will become unreachable if it is the // only non-loop predecessor. switch (block->bbJumpKind) { case BBJ_NONE: if (block->bbNext == loop.lpEntry) { removeLoop = true; } break; case BBJ_COND: if ((block->bbNext == loop.lpEntry) || (block->bbJumpDest == loop.lpEntry)) { removeLoop = true; } break; case BBJ_ALWAYS: if (block->bbJumpDest == loop.lpEntry) { removeLoop = true; } break; case BBJ_SWITCH: for (BasicBlock* const bTarget : block->SwitchTargets()) { if (bTarget == loop.lpEntry) { removeLoop = true; break; } } break; default: break; } if (removeLoop) { // Check if the entry has other predecessors outside the loop. // TODO: Replace this when predecessors are available. for (BasicBlock* const auxBlock : Blocks()) { // Ignore blocks in the loop. if (loop.lpContains(auxBlock)) { continue; } switch (auxBlock->bbJumpKind) { case BBJ_NONE: if (auxBlock->bbNext == loop.lpEntry) { removeLoop = false; } break; case BBJ_COND: if ((auxBlock->bbNext == loop.lpEntry) || (auxBlock->bbJumpDest == loop.lpEntry)) { removeLoop = false; } break; case BBJ_ALWAYS: if (auxBlock->bbJumpDest == loop.lpEntry) { removeLoop = false; } break; case BBJ_SWITCH: for (BasicBlock* const bTarget : auxBlock->SwitchTargets()) { if (bTarget == loop.lpEntry) { removeLoop = false; break; } } break; default: break; } } if (removeLoop) { reportBefore(); optMarkLoopRemoved(loopNum); } } else if (loop.lpHead == block) { reportBefore(); /* The loop has a new head - Just update the loop table */ loop.lpHead = block->bbPrev; } reportAfter(); } if ((skipUnmarkLoop == false) && // block->KindIs(BBJ_ALWAYS, BBJ_COND) && // block->bbJumpDest->isLoopHead() && // (block->bbJumpDest->bbNum <= block->bbNum) && // fgDomsComputed && // (fgCurBBEpochSize == fgDomBBcount + 1) && // fgReachable(block->bbJumpDest, block)) { optUnmarkLoopBlocks(block->bbJumpDest, block); } } //------------------------------------------------------------------------ // optClearLoopIterInfo: Clear the info related to LPFLG_ITER loops in the loop table. // The various fields related to iterators is known to be valid for loop cloning and unrolling, // but becomes invalid afterwards. Clear the info that might be used incorrectly afterwards // in JitDump or by subsequent phases. // void Compiler::optClearLoopIterInfo() { for (unsigned lnum = 0; lnum < optLoopCount; lnum++) { LoopDsc& loop = optLoopTable[lnum]; loop.lpFlags &= ~(LPFLG_ITER | LPFLG_VAR_INIT | LPFLG_CONST_INIT | LPFLG_SIMD_LIMIT | LPFLG_VAR_LIMIT | LPFLG_CONST_LIMIT | LPFLG_ARRLEN_LIMIT); loop.lpIterTree = nullptr; loop.lpInitBlock = nullptr; loop.lpConstInit = -1; // union with loop.lpVarInit loop.lpTestTree = nullptr; } } #ifdef DEBUG /***************************************************************************** * * Print loop info in an uniform way. */ void Compiler::optPrintLoopInfo(const LoopDsc* loop, bool printVerbose /* = false */) { assert(optLoopTable != nullptr); assert((&optLoopTable[0] <= loop) && (loop < &optLoopTable[optLoopCount])); unsigned lnum = (unsigned)(loop - optLoopTable); assert(lnum < optLoopCount); assert(&optLoopTable[lnum] == loop); if (loop->lpFlags & LPFLG_REMOVED) { // If a loop has been removed, it might be dangerous to print its fields (e.g., loop unrolling // nulls out the lpHead field). printf(FMT_LP " REMOVED", lnum); return; } printf(FMT_LP ", from " FMT_BB " to " FMT_BB " (Head=" FMT_BB ", Entry=" FMT_BB, lnum, loop->lpTop->bbNum, loop->lpBottom->bbNum, loop->lpHead->bbNum, loop->lpEntry->bbNum); if (loop->lpExitCnt == 1) { printf(", Exit=" FMT_BB, loop->lpExit->bbNum); } else { printf(", ExitCnt=%d", loop->lpExitCnt); } if (loop->lpParent != BasicBlock::NOT_IN_LOOP) { printf(", parent=" FMT_LP, loop->lpParent); } printf(")"); if (printVerbose) { if (loop->lpChild != BasicBlock::NOT_IN_LOOP) { printf(", child loop = " FMT_LP, loop->lpChild); } if (loop->lpSibling != BasicBlock::NOT_IN_LOOP) { printf(", sibling loop = " FMT_LP, loop->lpSibling); } // If an iterator loop print the iterator and the initialization. if (loop->lpFlags & LPFLG_ITER) { printf(" [over V%02u", loop->lpIterVar()); printf(" ("); printf(GenTree::OpName(loop->lpIterOper())); printf(" %d)", loop->lpIterConst()); if (loop->lpFlags & LPFLG_CONST_INIT) { printf(" from %d", loop->lpConstInit); } if (loop->lpFlags & LPFLG_VAR_INIT) { printf(" from V%02u", loop->lpVarInit); } if (loop->lpFlags & (LPFLG_CONST_INIT | LPFLG_VAR_INIT)) { if (loop->lpInitBlock != loop->lpHead) { printf(" (in " FMT_BB ")", loop->lpInitBlock->bbNum); } } // If a simple test condition print operator and the limits */ printf(" %s", GenTree::OpName(loop->lpTestOper())); if (loop->lpFlags & LPFLG_CONST_LIMIT) { printf(" %d", loop->lpConstLimit()); if (loop->lpFlags & LPFLG_SIMD_LIMIT) { printf(" (simd)"); } } if (loop->lpFlags & LPFLG_VAR_LIMIT) { printf(" V%02u", loop->lpVarLimit()); } if (loop->lpFlags & LPFLG_ARRLEN_LIMIT) { ArrIndex* index = new (getAllocator(CMK_DebugOnly)) ArrIndex(getAllocator(CMK_DebugOnly)); if (loop->lpArrLenLimit(this, index)) { printf(" "); index->Print(); printf(".Length"); } else { printf(" ???.Length"); } } printf("]"); } // Print the flags if (loop->lpFlags & LPFLG_CONTAINS_CALL) { printf(" call"); } if (loop->lpFlags & LPFLG_HAS_PREHEAD) { printf(" prehead"); } if (loop->lpFlags & LPFLG_DONT_UNROLL) { printf(" !unroll"); } if (loop->lpFlags & LPFLG_ASGVARS_YES) { printf(" avyes"); } if (loop->lpFlags & LPFLG_ASGVARS_INC) { printf(" avinc"); } } } void Compiler::optPrintLoopInfo(unsigned lnum, bool printVerbose /* = false */) { assert(lnum < optLoopCount); const LoopDsc& loop = optLoopTable[lnum]; optPrintLoopInfo(&loop, printVerbose); } //------------------------------------------------------------------------ // optPrintLoopTable: Print the loop table // void Compiler::optPrintLoopTable() { printf("\n*************** Natural loop table\n"); if (optLoopCount == 0) { printf("No loops\n"); } else { for (unsigned loopInd = 0; loopInd < optLoopCount; loopInd++) { optPrintLoopInfo(loopInd, /* verbose */ true); printf("\n"); } } printf("\n"); } #endif // DEBUG //------------------------------------------------------------------------ // optPopulateInitInfo: Populate loop init info in the loop table. // // Arguments: // loopInd - loop index // initBlock - block in which the initialization lives. // init - the tree that is supposed to initialize the loop iterator. // iterVar - loop iteration variable. // // Return Value: // "false" if the loop table could not be populated with the loop iterVar init info. // // Operation: // The 'init' tree is checked if its lhs is a local and rhs is either // a const or a local. // bool Compiler::optPopulateInitInfo(unsigned loopInd, BasicBlock* initBlock, GenTree* init, unsigned iterVar) { // Operator should be = if (init->gtOper != GT_ASG) { return false; } GenTree* lhs = init->AsOp()->gtOp1; GenTree* rhs = init->AsOp()->gtOp2; // LHS has to be local and should equal iterVar. if (lhs->gtOper != GT_LCL_VAR || lhs->AsLclVarCommon()->GetLclNum() != iterVar) { return false; } // RHS can be constant or local var. // TODO-CQ: CLONE: Add arr length for descending loops. if (rhs->gtOper == GT_CNS_INT && rhs->TypeGet() == TYP_INT) { optLoopTable[loopInd].lpFlags |= LPFLG_CONST_INIT; optLoopTable[loopInd].lpConstInit = (int)rhs->AsIntCon()->gtIconVal; optLoopTable[loopInd].lpInitBlock = initBlock; } else if (rhs->gtOper == GT_LCL_VAR) { optLoopTable[loopInd].lpFlags |= LPFLG_VAR_INIT; optLoopTable[loopInd].lpVarInit = rhs->AsLclVarCommon()->GetLclNum(); optLoopTable[loopInd].lpInitBlock = initBlock; } else { return false; } return true; } //---------------------------------------------------------------------------------- // optCheckIterInLoopTest: Check if iter var is used in loop test. // // Arguments: // test "jtrue" tree or an asg of the loop iter termination condition // from/to blocks (beg, end) which are part of the loop. // iterVar loop iteration variable. // loopInd loop index. // // Operation: // The test tree is parsed to check if "iterVar" matches the lhs of the condition // and the rhs limit is extracted from the "test" tree. The limit information is // added to the loop table. // // Return Value: // "false" if the loop table could not be populated with the loop test info or // if the test condition doesn't involve iterVar. // bool Compiler::optCheckIterInLoopTest( unsigned loopInd, GenTree* test, BasicBlock* from, BasicBlock* to, unsigned iterVar) { // Obtain the relop from the "test" tree. GenTree* relop; if (test->gtOper == GT_JTRUE) { relop = test->gtGetOp1(); } else { assert(test->gtOper == GT_ASG); relop = test->gtGetOp2(); } noway_assert(relop->OperIsCompare()); GenTree* opr1 = relop->AsOp()->gtOp1; GenTree* opr2 = relop->AsOp()->gtOp2; GenTree* iterOp; GenTree* limitOp; // Make sure op1 or op2 is the iterVar. if (opr1->gtOper == GT_LCL_VAR && opr1->AsLclVarCommon()->GetLclNum() == iterVar) { iterOp = opr1; limitOp = opr2; } else if (opr2->gtOper == GT_LCL_VAR && opr2->AsLclVarCommon()->GetLclNum() == iterVar) { iterOp = opr2; limitOp = opr1; } else { return false; } if (iterOp->gtType != TYP_INT) { return false; } // Mark the iterator node. iterOp->gtFlags |= GTF_VAR_ITERATOR; // Check what type of limit we have - constant, variable or arr-len. if (limitOp->gtOper == GT_CNS_INT) { optLoopTable[loopInd].lpFlags |= LPFLG_CONST_LIMIT; if ((limitOp->gtFlags & GTF_ICON_SIMD_COUNT) != 0) { optLoopTable[loopInd].lpFlags |= LPFLG_SIMD_LIMIT; } } else if (limitOp->gtOper == GT_LCL_VAR && !optIsVarAssigned(from, to, nullptr, limitOp->AsLclVarCommon()->GetLclNum())) { optLoopTable[loopInd].lpFlags |= LPFLG_VAR_LIMIT; } else if (limitOp->gtOper == GT_ARR_LENGTH) { optLoopTable[loopInd].lpFlags |= LPFLG_ARRLEN_LIMIT; } else { return false; } // Save the type of the comparison between the iterator and the limit. optLoopTable[loopInd].lpTestTree = relop; return true; } //---------------------------------------------------------------------------------- // optIsLoopIncrTree: Check if loop is a tree of form v += 1 or v = v + 1 // // Arguments: // incr The incr tree to be checked. Whether incr tree is // oper-equal(+=, -=...) type nodes or v=v+1 type ASG nodes. // // Operation: // The test tree is parsed to check if "iterVar" matches the lhs of the condition // and the rhs limit is extracted from the "test" tree. The limit information is // added to the loop table. // // Return Value: // iterVar local num if the iterVar is found, otherwise BAD_VAR_NUM. // unsigned Compiler::optIsLoopIncrTree(GenTree* incr) { GenTree* incrVal; genTreeOps updateOper; unsigned iterVar = incr->IsLclVarUpdateTree(&incrVal, &updateOper); if (iterVar != BAD_VAR_NUM) { // We have v = v op y type asg node. switch (updateOper) { case GT_ADD: case GT_SUB: case GT_MUL: case GT_RSH: case GT_LSH: break; default: return BAD_VAR_NUM; } // Increment should be by a const int. // TODO-CQ: CLONE: allow variable increments. if ((incrVal->gtOper != GT_CNS_INT) || (incrVal->TypeGet() != TYP_INT)) { return BAD_VAR_NUM; } } return iterVar; } //---------------------------------------------------------------------------------- // optComputeIterInfo: Check tree is loop increment of a lcl that is loop-invariant. // // Arguments: // from, to - are blocks (beg, end) which are part of the loop. // incr - tree that increments the loop iterator. v+=1 or v=v+1. // pIterVar - see return value. // // Return Value: // Returns true if iterVar "v" can be returned in "pIterVar", otherwise returns // false. // // Operation: // Check if the "incr" tree is a "v=v+1 or v+=1" type tree and make sure it is not // assigned in the loop. // bool Compiler::optComputeIterInfo(GenTree* incr, BasicBlock* from, BasicBlock* to, unsigned* pIterVar) { unsigned iterVar = optIsLoopIncrTree(incr); if (iterVar == BAD_VAR_NUM) { return false; } if (optIsVarAssigned(from, to, incr, iterVar)) { JITDUMP("iterVar is assigned in loop\n"); return false; } *pIterVar = iterVar; return true; } //---------------------------------------------------------------------------------- // optIsLoopTestEvalIntoTemp: // Pattern match if the test tree is computed into a tmp // and the "tmp" is used as jump condition for loop termination. // // Arguments: // testStmt - is the JTRUE statement that is of the form: jmpTrue (Vtmp != 0) // where Vtmp contains the actual loop test result. // newTestStmt - contains the statement that is the actual test stmt involving // the loop iterator. // // Return Value: // Returns true if a new test tree can be obtained. // // Operation: // Scan if the current stmt is a jtrue with (Vtmp != 0) as condition // Then returns the rhs for def of Vtmp as the "test" node. // // Note: // This method just retrieves what it thinks is the "test" node, // the callers are expected to verify that "iterVar" is used in the test. // bool Compiler::optIsLoopTestEvalIntoTemp(Statement* testStmt, Statement** newTestStmt) { GenTree* test = testStmt->GetRootNode(); if (test->gtOper != GT_JTRUE) { return false; } GenTree* relop = test->gtGetOp1(); noway_assert(relop->OperIsCompare()); GenTree* opr1 = relop->AsOp()->gtOp1; GenTree* opr2 = relop->AsOp()->gtOp2; // Make sure we have jtrue (vtmp != 0) if ((relop->OperGet() == GT_NE) && (opr1->OperGet() == GT_LCL_VAR) && (opr2->OperGet() == GT_CNS_INT) && opr2->IsIntegralConst(0)) { // Get the previous statement to get the def (rhs) of Vtmp to see // if the "test" is evaluated into Vtmp. Statement* prevStmt = testStmt->GetPrevStmt(); if (prevStmt == nullptr) { return false; } GenTree* tree = prevStmt->GetRootNode(); if (tree->OperGet() == GT_ASG) { GenTree* lhs = tree->AsOp()->gtOp1; GenTree* rhs = tree->AsOp()->gtOp2; // Return as the new test node. if (lhs->gtOper == GT_LCL_VAR && lhs->AsLclVarCommon()->GetLclNum() == opr1->AsLclVarCommon()->GetLclNum()) { if (rhs->OperIsCompare()) { *newTestStmt = prevStmt; return true; } } } } return false; } //---------------------------------------------------------------------------------- // optExtractInitTestIncr: // Extract the "init", "test" and "incr" nodes of the loop. // // Arguments: // head - Loop head block // bottom - Loop bottom block // top - Loop top block // ppInit - The init stmt of the loop if found. // ppTest - The test stmt of the loop if found. // ppIncr - The incr stmt of the loop if found. // // Return Value: // The results are put in "ppInit", "ppTest" and "ppIncr" if the method // returns true. Returns false if the information can't be extracted. // // Operation: // Check if the "test" stmt is last stmt in the loop "bottom". If found good, // "test" stmt is found. Try to find the "incr" stmt. Check previous stmt of // "test" to get the "incr" stmt. If it is not found it could be a loop of the // below form. // // +-------<-----------------<-----------+ // | | // v | // BBinit(head) -> BBcond(top) -> BBLoopBody(bottom) ---^ // // Check if the "incr" tree is present in the loop "top" node as the last stmt. // Also check if the "test" tree is assigned to a tmp node and the tmp is used // in the jtrue condition. // // Note: // This method just retrieves what it thinks is the "test" node, // the callers are expected to verify that "iterVar" is used in the test. // bool Compiler::optExtractInitTestIncr( BasicBlock* head, BasicBlock* bottom, BasicBlock* top, GenTree** ppInit, GenTree** ppTest, GenTree** ppIncr) { assert(ppInit != nullptr); assert(ppTest != nullptr); assert(ppIncr != nullptr); // Check if last two statements in the loop body are the increment of the iterator // and the loop termination test. noway_assert(bottom->bbStmtList != nullptr); Statement* testStmt = bottom->lastStmt(); noway_assert(testStmt != nullptr && testStmt->GetNextStmt() == nullptr); Statement* newTestStmt; if (optIsLoopTestEvalIntoTemp(testStmt, &newTestStmt)) { testStmt = newTestStmt; } // Check if we have the incr stmt before the test stmt, if we don't, // check if incr is part of the loop "top". Statement* incrStmt = testStmt->GetPrevStmt(); if (incrStmt == nullptr || optIsLoopIncrTree(incrStmt->GetRootNode()) == BAD_VAR_NUM) { if (top == nullptr || top->bbStmtList == nullptr || top->bbStmtList->GetPrevStmt() == nullptr) { return false; } // If the prev stmt to loop test is not incr, then check if we have loop test evaluated into a tmp. Statement* toplastStmt = top->lastStmt(); if (optIsLoopIncrTree(toplastStmt->GetRootNode()) != BAD_VAR_NUM) { incrStmt = toplastStmt; } else { return false; } } assert(testStmt != incrStmt); // Find the last statement in the loop pre-header which we expect to be the initialization of // the loop iterator. Statement* phdrStmt = head->firstStmt(); if (phdrStmt == nullptr) { return false; } Statement* initStmt = phdrStmt->GetPrevStmt(); noway_assert(initStmt != nullptr && (initStmt->GetNextStmt() == nullptr)); // If it is a duplicated loop condition, skip it. if (initStmt->GetRootNode()->OperIs(GT_JTRUE)) { bool doGetPrev = true; #ifdef DEBUG if (opts.optRepeat) { // Previous optimization passes may have inserted compiler-generated // statements other than duplicated loop conditions. doGetPrev = (initStmt->GetPrevStmt() != nullptr); } else { // Must be a duplicated loop condition. noway_assert(initStmt->GetRootNode()->gtOper == GT_JTRUE); } #endif // DEBUG if (doGetPrev) { initStmt = initStmt->GetPrevStmt(); } noway_assert(initStmt != nullptr); } *ppInit = initStmt->GetRootNode(); *ppTest = testStmt->GetRootNode(); *ppIncr = incrStmt->GetRootNode(); return true; } /***************************************************************************** * * Record the loop in the loop table. Return true if successful, false if * out of entries in loop table. */ bool Compiler::optRecordLoop( BasicBlock* head, BasicBlock* top, BasicBlock* entry, BasicBlock* bottom, BasicBlock* exit, unsigned char exitCnt) { if (exitCnt == 1) { noway_assert(exit != nullptr); } // Record this loop in the table, if there's room. assert(optLoopCount <= BasicBlock::MAX_LOOP_NUM); if (optLoopCount == BasicBlock::MAX_LOOP_NUM) { #if COUNT_LOOPS loopOverflowThisMethod = true; #endif return false; } // Assumed preconditions on the loop we're adding. assert(top->bbNum <= entry->bbNum); assert(entry->bbNum <= bottom->bbNum); assert(head->bbNum < top->bbNum || head->bbNum > bottom->bbNum); unsigned char loopInd = optLoopCount; if (optLoopTable == nullptr) { assert(loopInd == 0); optLoopTable = getAllocator(CMK_LoopOpt).allocate<LoopDsc>(BasicBlock::MAX_LOOP_NUM); NewLoopEpoch(); } else { // If the new loop contains any existing ones, add it in the right place. for (unsigned char prevPlus1 = optLoopCount; prevPlus1 > 0; prevPlus1--) { unsigned char prev = prevPlus1 - 1; if (optLoopTable[prev].lpContainedBy(top, bottom)) { loopInd = prev; } } // Move up any loops if necessary. for (unsigned j = optLoopCount; j > loopInd; j--) { optLoopTable[j] = optLoopTable[j - 1]; } } #ifdef DEBUG for (unsigned i = loopInd + 1; i < optLoopCount; i++) { // The loop is well-formed. assert(optLoopTable[i].lpWellFormed()); // Check for disjoint. if (optLoopTable[i].lpDisjoint(top, bottom)) { continue; } // Otherwise, assert complete containment (of optLoopTable[i] in new loop). assert(optLoopTable[i].lpContainedBy(top, bottom)); } #endif // DEBUG optLoopTable[loopInd].lpHead = head; optLoopTable[loopInd].lpTop = top; optLoopTable[loopInd].lpBottom = bottom; optLoopTable[loopInd].lpEntry = entry; optLoopTable[loopInd].lpExit = exit; optLoopTable[loopInd].lpExitCnt = exitCnt; optLoopTable[loopInd].lpParent = BasicBlock::NOT_IN_LOOP; optLoopTable[loopInd].lpChild = BasicBlock::NOT_IN_LOOP; optLoopTable[loopInd].lpSibling = BasicBlock::NOT_IN_LOOP; optLoopTable[loopInd].lpAsgVars = AllVarSetOps::UninitVal(); optLoopTable[loopInd].lpFlags = LPFLG_EMPTY; // We haven't yet recorded any side effects. for (MemoryKind memoryKind : allMemoryKinds()) { optLoopTable[loopInd].lpLoopHasMemoryHavoc[memoryKind] = false; } optLoopTable[loopInd].lpFieldsModified = nullptr; optLoopTable[loopInd].lpArrayElemTypesModified = nullptr; // // Try to find loops that have an iterator (i.e. for-like loops) "for (init; test; incr){ ... }" // We have the following restrictions: // 1. The loop condition must be a simple one i.e. only one JTRUE node // 2. There must be a loop iterator (a local var) that is // incremented (decremented or lsh, rsh, mul) with a constant value // 3. The iterator is incremented exactly once // 4. The loop condition must use the iterator. // if (bottom->bbJumpKind == BBJ_COND) { GenTree* init; GenTree* test; GenTree* incr; if (!optExtractInitTestIncr(head, bottom, top, &init, &test, &incr)) { goto DONE_LOOP; } unsigned iterVar = BAD_VAR_NUM; if (!optComputeIterInfo(incr, head->bbNext, bottom, &iterVar)) { goto DONE_LOOP; } // Make sure the "iterVar" initialization is never skipped, // i.e. every pred of ENTRY other than HEAD is in the loop. for (BasicBlock* const predBlock : entry->PredBlocks()) { if ((predBlock != head) && !optLoopTable[loopInd].lpContains(predBlock)) { goto DONE_LOOP; } } if (!optPopulateInitInfo(loopInd, head, init, iterVar)) { goto DONE_LOOP; } // Check that the iterator is used in the loop condition. if (!optCheckIterInLoopTest(loopInd, test, head->bbNext, bottom, iterVar)) { goto DONE_LOOP; } // We know the loop has an iterator at this point ->flag it as LPFLG_ITER // Record the iterator, the pointer to the test node // and the initial value of the iterator (constant or local var) optLoopTable[loopInd].lpFlags |= LPFLG_ITER; // Record iterator. optLoopTable[loopInd].lpIterTree = incr; #if COUNT_LOOPS // Save the initial value of the iterator - can be lclVar or constant // Flag the loop accordingly. iterLoopCount++; #endif #if COUNT_LOOPS simpleTestLoopCount++; #endif #if COUNT_LOOPS // Check if a constant iteration loop. if ((optLoopTable[loopInd].lpFlags & LPFLG_CONST_INIT) && (optLoopTable[loopInd].lpFlags & LPFLG_CONST_LIMIT)) { // This is a constant loop. constIterLoopCount++; } #endif #ifdef DEBUG if (verbose && 0) { printf("\nConstant loop initializer:\n"); gtDispTree(init); printf("\nConstant loop body:\n"); BasicBlock* block = head; do { block = block->bbNext; for (Statement* const stmt : block->Statements()) { if (stmt->GetRootNode() == incr) { break; } printf("\n"); gtDispTree(stmt->GetRootNode()); } } while (block != bottom); } #endif // DEBUG } DONE_LOOP: bool loopInsertedAtEnd = (loopInd == optLoopCount); optLoopCount++; #ifdef DEBUG if (verbose) { printf("Recorded loop %s", loopInsertedAtEnd ? "" : "(extended) "); optPrintLoopInfo(loopInd, /* verbose */ true); printf("\n"); } #endif // DEBUG return true; } #ifdef DEBUG void Compiler::optCheckPreds() { for (BasicBlock* const block : Blocks()) { for (BasicBlock* const predBlock : block->PredBlocks()) { // make sure this pred is part of the BB list BasicBlock* bb; for (bb = fgFirstBB; bb; bb = bb->bbNext) { if (bb == predBlock) { break; } } noway_assert(bb); switch (bb->bbJumpKind) { case BBJ_COND: if (bb->bbJumpDest == block) { break; } FALLTHROUGH; case BBJ_NONE: noway_assert(bb->bbNext == block); break; case BBJ_EHFILTERRET: case BBJ_ALWAYS: case BBJ_EHCATCHRET: noway_assert(bb->bbJumpDest == block); break; default: break; } } } } #endif // DEBUG namespace { //------------------------------------------------------------------------ // LoopSearch: Class that handles scanning a range of blocks to detect a loop, // moving blocks to make the loop body contiguous, and recording the loop. // // We will use the following terminology: // HEAD - the basic block that flows into the loop ENTRY block (Currently MUST be lexically before entry). // Not part of the looping of the loop. // TOP - the target of the backward edge from BOTTOM, and the lexically first basic block (in bbNext order) // within this loop. // BOTTOM - the lexically last block in the loop (i.e. the block from which we jump to the top) // EXIT - the predecessor of loop's unique exit edge, if it has a unique exit edge; else nullptr // ENTRY - the entry in the loop (not necessarly the TOP), but there must be only one entry // // We (currently) require the body of a loop to be a contiguous (in bbNext order) sequence of basic blocks. // When the loop is identified, blocks will be moved out to make it a compact contiguous region if possible, // and in cases where compaction is not possible, we'll subsequently treat all blocks in the lexical range // between TOP and BOTTOM as part of the loop even if they aren't part of the SCC. // Regarding nesting: Since a given block can only have one back-edge (we only detect loops with back-edges // from BBJ_COND or BBJ_ALWAYS blocks), no two loops will share the same BOTTOM. Two loops may share the // same TOP/ENTRY as reported by LoopSearch, and optCanonicalizeLoopNest will subsequently re-write // the CFG so that no two loops share the same TOP/ENTRY anymore. // // | // v // head // | // | top <--+ // | | | // | ... | // | | | // | v | // +---> entry | // | | // ... | // | | // v | // +-- exit/tail | // | | | // | ... | // | | | // | v | // | bottom ---+ // | // +------+ // | // v // class LoopSearch { // Keeping track of which blocks are in the loop requires two block sets since we may add blocks // as we go but the BlockSet type's max ID doesn't increase to accommodate them. Define a helper // struct to make the ensuing code more readable. struct LoopBlockSet { private: // Keep track of blocks with bbNum <= oldBlockMaxNum in a regular BlockSet, since // it can hold all of them. BlockSet oldBlocksInLoop; // Blocks with bbNum <= oldBlockMaxNum // Keep track of blocks with bbNum > oldBlockMaxNum in a separate BlockSet, but // indexing them by (blockNum - oldBlockMaxNum); since we won't generate more than // one new block per old block, this must be sufficient to track any new blocks. BlockSet newBlocksInLoop; // Blocks with bbNum > oldBlockMaxNum Compiler* comp; unsigned int oldBlockMaxNum; public: LoopBlockSet(Compiler* comp) : oldBlocksInLoop(BlockSetOps::UninitVal()) , newBlocksInLoop(BlockSetOps::UninitVal()) , comp(comp) , oldBlockMaxNum(comp->fgBBNumMax) { } void Reset(unsigned int seedBlockNum) { if (BlockSetOps::MayBeUninit(oldBlocksInLoop)) { // Either the block sets are uninitialized (and long), so we need to initialize // them (and allocate their backing storage), or they are short and empty, so // assigning MakeEmpty to them is as cheap as ClearD. oldBlocksInLoop = BlockSetOps::MakeEmpty(comp); newBlocksInLoop = BlockSetOps::MakeEmpty(comp); } else { // We know the backing storage is already allocated, so just clear it. BlockSetOps::ClearD(comp, oldBlocksInLoop); BlockSetOps::ClearD(comp, newBlocksInLoop); } assert(seedBlockNum <= oldBlockMaxNum); BlockSetOps::AddElemD(comp, oldBlocksInLoop, seedBlockNum); } bool CanRepresent(unsigned int blockNum) { // We can represent old blocks up to oldBlockMaxNum, and // new blocks up to 2 * oldBlockMaxNum. return (blockNum <= 2 * oldBlockMaxNum); } bool IsMember(unsigned int blockNum) { if (blockNum > oldBlockMaxNum) { return BlockSetOps::IsMember(comp, newBlocksInLoop, blockNum - oldBlockMaxNum); } else { return BlockSetOps::IsMember(comp, oldBlocksInLoop, blockNum); } } void Insert(unsigned int blockNum) { if (blockNum > oldBlockMaxNum) { BlockSetOps::AddElemD(comp, newBlocksInLoop, blockNum - oldBlockMaxNum); } else { BlockSetOps::AddElemD(comp, oldBlocksInLoop, blockNum); } } bool TestAndInsert(unsigned int blockNum) { if (blockNum > oldBlockMaxNum) { unsigned int shiftedNum = blockNum - oldBlockMaxNum; if (!BlockSetOps::IsMember(comp, newBlocksInLoop, shiftedNum)) { BlockSetOps::AddElemD(comp, newBlocksInLoop, shiftedNum); return false; } } else { if (!BlockSetOps::IsMember(comp, oldBlocksInLoop, blockNum)) { BlockSetOps::AddElemD(comp, oldBlocksInLoop, blockNum); return false; } } return true; } }; LoopBlockSet loopBlocks; // Set of blocks identified as part of the loop Compiler* comp; // See LoopSearch class comment header for a diagram relating these fields: BasicBlock* head; // Predecessor of unique entry edge BasicBlock* top; // Successor of back-edge from BOTTOM BasicBlock* bottom; // Predecessor of back-edge to TOP, also lexically last in-loop block BasicBlock* entry; // Successor of unique entry edge BasicBlock* lastExit; // Most recently discovered exit block unsigned char exitCount; // Number of discovered exit edges unsigned int oldBlockMaxNum; // Used to identify new blocks created during compaction BlockSet bottomBlocks; // BOTTOM blocks of already-recorded loops #ifdef DEBUG bool forgotExit = false; // Flags a rare case where lastExit gets nulled out, for assertions #endif bool changedFlowGraph = false; // Signals that loop compaction has modified the flow graph public: LoopSearch(Compiler* comp) : loopBlocks(comp), comp(comp), oldBlockMaxNum(comp->fgBBNumMax), bottomBlocks(BlockSetOps::MakeEmpty(comp)) { // Make sure we've renumbered such that the bitsets can hold all the bits assert(comp->fgBBNumMax <= comp->fgCurBBEpochSize); } //------------------------------------------------------------------------ // RecordLoop: Notify the Compiler that a loop has been found. // // Return Value: // true - Loop successfully recorded. // false - Compiler has run out of loop descriptors; loop not recorded. // bool RecordLoop() { // At this point we have a compact loop - record it in the loop table. // If we found only one exit, record it in the table too // (otherwise an exit = nullptr in the loop table means multiple exits). BasicBlock* onlyExit = (exitCount == 1 ? lastExit : nullptr); if (comp->optRecordLoop(head, top, entry, bottom, onlyExit, exitCount)) { // Record the BOTTOM block for future reference before returning. assert(bottom->bbNum <= oldBlockMaxNum); BlockSetOps::AddElemD(comp, bottomBlocks, bottom->bbNum); return true; } // Unable to record this loop because the loop descriptor table overflowed. return false; } //------------------------------------------------------------------------ // ChangedFlowGraph: Determine whether loop compaction has modified the flow graph. // // Return Value: // true - The flow graph has been modified; fgUpdateChangedFlowGraph should // be called (which is the caller's responsibility). // false - The flow graph has not been modified by this LoopSearch. // bool ChangedFlowGraph() { return changedFlowGraph; } //------------------------------------------------------------------------ // FindLoop: Search for a loop with the given HEAD block and back-edge. // // Arguments: // head - Block to be the HEAD of any loop identified // top - Block to be the TOP of any loop identified // bottom - Block to be the BOTTOM of any loop identified // // Return Value: // true - Found a valid loop. // false - Did not find a valid loop. // // Notes: // May modify flow graph to make loop compact before returning. // Will set instance fields to track loop's extent and exits if a valid // loop is found, and potentially trash them otherwise. // bool FindLoop(BasicBlock* head, BasicBlock* top, BasicBlock* bottom) { // Is this a loop candidate? - We look for "back edges", i.e. an edge from BOTTOM // to TOP (note that this is an abuse of notation since this is not necessarily a back edge // as the definition says, but merely an indication that we have a loop there). // Thus, we have to be very careful and after entry discovery check that it is indeed // the only place we enter the loop (especially for non-reducible flow graphs). if (top->bbNum > bottom->bbNum) // is this a backward edge? (from BOTTOM to TOP) { // Edge from BOTTOM to TOP is not a backward edge return false; } if (bottom->bbNum > oldBlockMaxNum) { // Not a true back-edge; bottom is a block added to reconnect fall-through during // loop processing, so its block number does not reflect its position. return false; } if (bottom->KindIs(BBJ_EHFINALLYRET, BBJ_EHFILTERRET, BBJ_EHCATCHRET, BBJ_CALLFINALLY, BBJ_SWITCH)) { // BBJ_EHFINALLYRET, BBJ_EHFILTERRET, BBJ_EHCATCHRET, and BBJ_CALLFINALLY can never form a loop. // BBJ_SWITCH that has a backward jump appears only for labeled break. return false; } // The presence of a "back edge" is an indication that a loop might be present here. // // Definition: A loop is: // 1. A collection of STRONGLY CONNECTED nodes i.e. there is a path from any // node in the loop to any other node in the loop (wholly within the loop) // 2. The loop has a unique ENTRY, i.e. there is only one way to reach a node // in the loop from outside the loop, and that is through the ENTRY // Let's find the loop ENTRY BasicBlock* entry = FindEntry(head, top, bottom); if (entry == nullptr) { // For now, we only recognize loops where HEAD has some successor ENTRY in the loop. return false; } // Passed the basic checks; initialize instance state for this back-edge. this->head = head; this->top = top; this->entry = entry; this->bottom = bottom; this->lastExit = nullptr; this->exitCount = 0; if (!HasSingleEntryCycle()) { // There isn't actually a loop between TOP and BOTTOM return false; } if (!loopBlocks.IsMember(top->bbNum)) { // The "back-edge" we identified isn't actually part of the flow cycle containing ENTRY return false; } // Disqualify loops where the first block of the loop is less nested in EH than // the bottom block. That is, we don't want to handle loops where the back edge // goes from within an EH region to a first block that is outside that same EH // region. Note that we *do* handle loops where the first block is the *first* // block of a more nested EH region (since it is legal to branch to the first // block of an immediately more nested EH region). So, for example, disqualify // this: // // BB02 // ... // try { // ... // BB10 BBJ_COND => BB02 // ... // } // // Here, BB10 is more nested than BB02. if (bottom->hasTryIndex() && !comp->bbInTryRegions(bottom->getTryIndex(), top)) { JITDUMP("Loop 'top' " FMT_BB " is in an outer EH region compared to loop 'bottom' " FMT_BB ". Rejecting " "loop.\n", top->bbNum, bottom->bbNum); return false; } #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) // Disqualify loops where the first block of the loop is a finally target. // The main problem is when multiple loops share a 'top' block that is a finally // target and we canonicalize the loops by adding a new loop head. In that case, we // need to update the blocks so the finally target bit is moved to the newly created // block, and removed from the old 'top' block. This is 'hard', so it's easier to disallow // the loop than to update the flow graph to support this case. if ((top->bbFlags & BBF_FINALLY_TARGET) != 0) { JITDUMP("Loop 'top' " FMT_BB " is a finally target. Rejecting loop.\n", top->bbNum); return false; } #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) // Compact the loop (sweep through it and move out any blocks that aren't part of the // flow cycle), and find the exits. if (!MakeCompactAndFindExits()) { // Unable to preserve well-formed loop during compaction. return false; } // We have a valid loop. return true; } //------------------------------------------------------------------------ // GetExitCount: Return the exit count computed for the loop // unsigned char GetExitCount() const { return exitCount; } private: //------------------------------------------------------------------------ // FindEntry: See if given HEAD flows to valid ENTRY between given TOP and BOTTOM // // Arguments: // head - Block to be the HEAD of any loop identified // top - Block to be the TOP of any loop identified // bottom - Block to be the BOTTOM of any loop identified // // Return Value: // Block to be the ENTRY of any loop identified, or nullptr if no // such entry meeting our criteria can be found. // // Notes: // Returns main entry if one is found, does not check for side-entries. // BasicBlock* FindEntry(BasicBlock* head, BasicBlock* top, BasicBlock* bottom) { if (head->bbJumpKind == BBJ_ALWAYS) { if (head->bbJumpDest->bbNum <= bottom->bbNum && head->bbJumpDest->bbNum >= top->bbNum) { // OK - we enter somewhere within the loop. // Cannot enter at the top - should have being caught by redundant jumps assert((head->bbJumpDest != top) || (head->bbFlags & BBF_KEEP_BBJ_ALWAYS)); return head->bbJumpDest; } else { // special case - don't consider now // assert (!"Loop entered in weird way!"); return nullptr; } } // Can we fall through into the loop? else if (head->KindIs(BBJ_NONE, BBJ_COND)) { // The ENTRY is at the TOP (a do-while loop) return top; } else { return nullptr; // HEAD does not flow into the loop; bail for now } } //------------------------------------------------------------------------ // HasSingleEntryCycle: Perform a reverse flow walk from ENTRY, visiting // only blocks between TOP and BOTTOM, to determine if such a cycle // exists and if it has a single entry. // // Return Value: // true - Found a single-entry cycle. // false - Did not find a single-entry cycle. // // Notes: // Will mark (in `loopBlocks`) all blocks found to participate in the cycle. // bool HasSingleEntryCycle() { // Now do a backwards flow walk from entry to see if we have a single-entry loop bool foundCycle = false; // Seed the loop block set and worklist with the entry block. loopBlocks.Reset(entry->bbNum); jitstd::list<BasicBlock*> worklist(comp->getAllocator(CMK_LoopOpt)); worklist.push_back(entry); while (!worklist.empty()) { BasicBlock* block = worklist.back(); worklist.pop_back(); // Make sure ENTRY dominates all blocks in the loop. if (block->bbNum > oldBlockMaxNum) { // This is a new block we added to connect fall-through, so the // recorded dominator information doesn't cover it. Just continue, // and when we process its unique predecessor we'll abort if ENTRY // doesn't dominate that. } else if (!comp->fgDominate(entry, block)) { return false; } // Add preds to the worklist, checking for side-entries. for (BasicBlock* const predBlock : block->PredBlocks()) { unsigned int testNum = PositionNum(predBlock); if ((testNum < top->bbNum) || (testNum > bottom->bbNum)) { // Pred is out of loop range if (block == entry) { if (predBlock == head) { // This is the single entry we expect. continue; } // ENTRY has some pred other than head outside the loop. If ENTRY does not // dominate this pred, we'll consider this a side-entry and skip this loop; // otherwise the loop is still valid and this may be a (flow-wise) back-edge // of an outer loop. For the dominance test, if `predBlock` is a new block, use // its unique predecessor since the dominator tree has info for that. BasicBlock* effectivePred = (predBlock->bbNum > oldBlockMaxNum ? predBlock->bbPrev : predBlock); if (comp->fgDominate(entry, effectivePred)) { // Outer loop back-edge continue; } } // There are multiple entries to this loop, don't consider it. return false; } bool isFirstVisit; if (predBlock == entry) { // We have indeed found a cycle in the flow graph. isFirstVisit = !foundCycle; foundCycle = true; assert(loopBlocks.IsMember(predBlock->bbNum)); } else if (loopBlocks.TestAndInsert(predBlock->bbNum)) { // Already visited this pred isFirstVisit = false; } else { // Add this predBlock to the worklist worklist.push_back(predBlock); isFirstVisit = true; } if (isFirstVisit && (predBlock->bbNext != nullptr) && (PositionNum(predBlock->bbNext) == predBlock->bbNum)) { // We've created a new block immediately after `predBlock` to // reconnect what was fall-through. Mark it as in-loop also; // it needs to stay with `prev` and if it exits the loop we'd // just need to re-create it if we tried to move it out. loopBlocks.Insert(predBlock->bbNext->bbNum); } } } return foundCycle; } //------------------------------------------------------------------------ // PositionNum: Get the number identifying a block's position per the // lexical ordering that existed before searching for (and compacting) // loops. // // Arguments: // block - Block whose position is desired. // // Return Value: // A number indicating that block's position relative to others. // // Notes: // When the given block is a new one created during loop compaction, // the number of its unique predecessor is returned. // unsigned int PositionNum(BasicBlock* block) { if (block->bbNum > oldBlockMaxNum) { // This must be a block we inserted to connect fall-through after moving blocks. // To determine if it's in the loop or not, use the number of its unique predecessor // block. assert(block->bbPreds->getBlock() == block->bbPrev); assert(block->bbPreds->flNext == nullptr); return block->bbPrev->bbNum; } return block->bbNum; } //------------------------------------------------------------------------ // MakeCompactAndFindExits: Compact the loop (sweep through it and move out // any blocks that aren't part of the flow cycle), and find the exits (set // lastExit and exitCount). // // Return Value: // true - Loop successfully compacted (or `loopBlocks` expanded to // include all blocks in the lexical range), exits enumerated. // false - Loop cannot be made compact and remain well-formed. // bool MakeCompactAndFindExits() { // Compaction (if it needs to happen) will require an insertion point. BasicBlock* moveAfter = nullptr; for (BasicBlock* previous = top->bbPrev; previous != bottom;) { BasicBlock* block = previous->bbNext; if (loopBlocks.IsMember(block->bbNum)) { // This block is a member of the loop. Check to see if it may exit the loop. CheckForExit(block); // Done processing this block; move on to the next. previous = block; continue; } // This blocks is lexically between TOP and BOTTOM, but it does not // participate in the flow cycle. Check for a run of consecutive // such blocks. BasicBlock* lastNonLoopBlock = block; BasicBlock* nextLoopBlock = block->bbNext; while (!loopBlocks.IsMember(nextLoopBlock->bbNum)) { lastNonLoopBlock = nextLoopBlock; nextLoopBlock = nextLoopBlock->bbNext; // This loop must terminate because we know BOTTOM is in loopBlocks. } // Choose an insertion point for non-loop blocks if we haven't yet done so. if (moveAfter == nullptr) { moveAfter = FindInsertionPoint(); } if (!BasicBlock::sameEHRegion(previous, nextLoopBlock) || !BasicBlock::sameEHRegion(previous, moveAfter)) { // EH regions would be ill-formed if we moved these blocks out. // See if we can consider them loop blocks without introducing // a side-entry. if (CanTreatAsLoopBlocks(block, lastNonLoopBlock)) { // The call to `canTreatAsLoop` marked these blocks as part of the loop; // iterate without updating `previous` so that we'll analyze them as part // of the loop. continue; } else { // We can't move these out of the loop or leave them in, so just give // up on this loop. return false; } } // Now physically move the blocks. BasicBlock* moveBefore = moveAfter->bbNext; comp->fgUnlinkRange(block, lastNonLoopBlock); comp->fgMoveBlocksAfter(block, lastNonLoopBlock, moveAfter); comp->ehUpdateLastBlocks(moveAfter, lastNonLoopBlock); // Apply any adjustments needed for fallthrough at the boundaries of the moved region. FixupFallThrough(moveAfter, moveBefore, block); FixupFallThrough(lastNonLoopBlock, nextLoopBlock, moveBefore); // Also apply any adjustments needed where the blocks were snipped out of the loop. BasicBlock* newBlock = FixupFallThrough(previous, block, nextLoopBlock); if (newBlock != nullptr) { // This new block is in the loop and is a loop exit. loopBlocks.Insert(newBlock->bbNum); lastExit = newBlock; ++exitCount; } // Update moveAfter for the next insertion. moveAfter = lastNonLoopBlock; // Note that we've changed the flow graph, and continue without updating // `previous` so that we'll process nextLoopBlock. changedFlowGraph = true; } if ((exitCount == 1) && (lastExit == nullptr)) { // If we happen to have a loop with two exits, one of which goes to an // infinite loop that's lexically nested inside it, where the inner loop // can't be moved out, we can end up in this situation (because // CanTreatAsLoopBlocks will have decremented the count expecting to find // another exit later). Bump the exit count to 2, since downstream code // will not be prepared for null lastExit with exitCount of 1. assert(forgotExit); exitCount = 2; } // Loop compaction was successful return true; } //------------------------------------------------------------------------ // FindInsertionPoint: Find an appropriate spot to which blocks that are // lexically between TOP and BOTTOM but not part of the flow cycle // can be moved. // // Return Value: // Block after which to insert moved blocks. // BasicBlock* FindInsertionPoint() { // Find an insertion point for blocks we're going to move. Move them down // out of the loop, and if possible find a spot that won't break up fall-through. BasicBlock* moveAfter = bottom; while (moveAfter->bbFallsThrough()) { // Keep looking for a better insertion point if we can. BasicBlock* newMoveAfter = TryAdvanceInsertionPoint(moveAfter); if (newMoveAfter == nullptr) { // Ran out of candidate insertion points, so just split up the fall-through. return moveAfter; } moveAfter = newMoveAfter; } return moveAfter; } //------------------------------------------------------------------------ // TryAdvanceInsertionPoint: Find the next legal insertion point after // the given one, if one exists. // // Arguments: // oldMoveAfter - Prior insertion point; find the next after this. // // Return Value: // The next block after `oldMoveAfter` that is a legal insertion point // (i.e. blocks being swept out of the loop can be moved immediately // after it), if one exists, else nullptr. // BasicBlock* TryAdvanceInsertionPoint(BasicBlock* oldMoveAfter) { BasicBlock* newMoveAfter = oldMoveAfter->bbNext; if (!BasicBlock::sameEHRegion(oldMoveAfter, newMoveAfter)) { // Don't cross an EH region boundary. return nullptr; } if (newMoveAfter->KindIs(BBJ_ALWAYS, BBJ_COND)) { unsigned int destNum = newMoveAfter->bbJumpDest->bbNum; if ((destNum >= top->bbNum) && (destNum <= bottom->bbNum) && !loopBlocks.IsMember(destNum)) { // Reversing this branch out of block `newMoveAfter` could confuse this algorithm // (in particular, the edge would still be numerically backwards but no longer be // lexically backwards, so a lexical forward walk from TOP would not find BOTTOM), // so don't do that. // We're checking for BBJ_ALWAYS and BBJ_COND only here -- we don't need to // check for BBJ_SWITCH because we'd never consider it a loop back-edge. return nullptr; } } // Similarly check to see if advancing to `newMoveAfter` would reverse the lexical order // of an edge from the run of blocks being moved to `newMoveAfter` -- doing so would // introduce a new lexical back-edge, which could (maybe?) confuse the loop search // algorithm, and isn't desirable layout anyway. for (BasicBlock* const predBlock : newMoveAfter->PredBlocks()) { unsigned int predNum = predBlock->bbNum; if ((predNum >= top->bbNum) && (predNum <= bottom->bbNum) && !loopBlocks.IsMember(predNum)) { // Don't make this forward edge a backwards edge. return nullptr; } } if (IsRecordedBottom(newMoveAfter)) { // This is the BOTTOM of another loop; don't move any blocks past it, to avoid moving them // out of that loop (we should have already done so when processing that loop if it were legal). return nullptr; } // Advancing the insertion point is ok, except that we can't split up any CallFinally/BBJ_ALWAYS // pair, so if we've got such a pair recurse to see if we can move past the whole thing. return (newMoveAfter->isBBCallAlwaysPair() ? TryAdvanceInsertionPoint(newMoveAfter) : newMoveAfter); } //------------------------------------------------------------------------ // isOuterBottom: Determine if the given block is the BOTTOM of a previously // recorded loop. // // Arguments: // block - Block to check for BOTTOM-ness. // // Return Value: // true - The blocks was recorded as `bottom` of some earlier-processed loop. // false - No loops yet recorded have this block as their `bottom`. // bool IsRecordedBottom(BasicBlock* block) { if (block->bbNum > oldBlockMaxNum) { // This is a new block, which can't be an outer bottom block because we only allow old blocks // as BOTTOM. return false; } return BlockSetOps::IsMember(comp, bottomBlocks, block->bbNum); } //------------------------------------------------------------------------ // CanTreatAsLoopBlocks: If the given range of blocks can be treated as // loop blocks, add them to loopBlockSet and return true. Otherwise, // return false. // // Arguments: // firstNonLoopBlock - First block in the run to be subsumed. // lastNonLoopBlock - Last block in the run to be subsumed. // // Return Value: // true - The blocks from `fistNonLoopBlock` to `lastNonLoopBlock` were // successfully added to `loopBlocks`. // false - Treating the blocks from `fistNonLoopBlock` to `lastNonLoopBlock` // would not be legal (it would induce a side-entry). // // Notes: // `loopBlocks` may be modified even if `false` is returned. // `exitCount` and `lastExit` may be modified if this process identifies // in-loop edges that were previously counted as exits. // bool CanTreatAsLoopBlocks(BasicBlock* firstNonLoopBlock, BasicBlock* lastNonLoopBlock) { for (BasicBlock* const testBlock : comp->Blocks(firstNonLoopBlock, lastNonLoopBlock)) { for (BasicBlock* const testPred : testBlock->PredBlocks()) { unsigned int predPosNum = PositionNum(testPred); unsigned int firstNonLoopPosNum = PositionNum(firstNonLoopBlock); unsigned int lastNonLoopPosNum = PositionNum(lastNonLoopBlock); if (loopBlocks.IsMember(predPosNum) || ((predPosNum >= firstNonLoopPosNum) && (predPosNum <= lastNonLoopPosNum))) { // This pred is in the loop (or what will be the loop if we determine this // run of exit blocks doesn't include a side-entry). if (predPosNum < firstNonLoopPosNum) { // We've already counted this block as an exit, so decrement the count. --exitCount; if (lastExit == testPred) { // Erase this now-bogus `lastExit` entry. lastExit = nullptr; INDEBUG(forgotExit = true); } } } else { // This pred is not in the loop, so this constitutes a side-entry. return false; } } // Either we're going to abort the loop on a subsequent testBlock, or this // testBlock is part of the loop. loopBlocks.Insert(testBlock->bbNum); } // All blocks were ok to leave in the loop. return true; } //------------------------------------------------------------------------ // FixupFallThrough: Re-establish any broken control flow connectivity // and eliminate any "goto-next"s that were created by changing the // given block's lexical follower. // // Arguments: // block - Block whose `bbNext` has changed. // oldNext - Previous value of `block->bbNext`. // newNext - New value of `block->bbNext`. // // Return Value: // If a new block is created to reconnect flow, the new block is // returned; otherwise, nullptr. // BasicBlock* FixupFallThrough(BasicBlock* block, BasicBlock* oldNext, BasicBlock* newNext) { // If we create a new block, that will be our return value. BasicBlock* newBlock = nullptr; if (block->bbFallsThrough()) { // Need to reconnect the flow from `block` to `oldNext`. if ((block->bbJumpKind == BBJ_COND) && (block->bbJumpDest == newNext)) { // Reverse the jump condition GenTree* test = block->lastNode(); noway_assert(test->OperIsConditionalJump()); if (test->OperGet() == GT_JTRUE) { GenTree* cond = comp->gtReverseCond(test->AsOp()->gtOp1); assert(cond == test->AsOp()->gtOp1); // Ensure `gtReverseCond` did not create a new node. test->AsOp()->gtOp1 = cond; } else { comp->gtReverseCond(test); } // Redirect the Conditional JUMP to go to `oldNext` block->bbJumpDest = oldNext; } else { // Insert an unconditional jump to `oldNext` just after `block`. newBlock = comp->fgConnectFallThrough(block, oldNext); noway_assert((newBlock == nullptr) || loopBlocks.CanRepresent(newBlock->bbNum)); } } else if ((block->bbJumpKind == BBJ_ALWAYS) && (block->bbJumpDest == newNext)) { // We've made `block`'s jump target its bbNext, so remove the jump. if (!comp->fgOptimizeBranchToNext(block, newNext, block->bbPrev)) { // If optimizing away the goto-next failed for some reason, mark it KEEP_BBJ_ALWAYS to // prevent assertions from complaining about it. block->bbFlags |= BBF_KEEP_BBJ_ALWAYS; } } // Make sure we don't leave around a goto-next unless it's marked KEEP_BBJ_ALWAYS. assert(!block->KindIs(BBJ_COND, BBJ_ALWAYS) || (block->bbJumpDest != newNext) || ((block->bbFlags & BBF_KEEP_BBJ_ALWAYS) != 0)); return newBlock; } //------------------------------------------------------------------------ // CheckForExit: Check if the given block has any successor edges that are // loop exits, and update `lastExit` and `exitCount` if so. // // Arguments: // block - Block whose successor edges are to be checked. // // Notes: // If one block has multiple exiting successor edges, those are counted // as multiple exits in `exitCount`. // void CheckForExit(BasicBlock* block) { BasicBlock* exitPoint; switch (block->bbJumpKind) { case BBJ_COND: case BBJ_CALLFINALLY: case BBJ_ALWAYS: case BBJ_EHCATCHRET: assert(block->bbJumpDest); exitPoint = block->bbJumpDest; if (!loopBlocks.IsMember(exitPoint->bbNum)) { // Exit from a block other than BOTTOM lastExit = block; exitCount++; } break; case BBJ_NONE: break; case BBJ_EHFINALLYRET: case BBJ_EHFILTERRET: // The "try" associated with this "finally" must be in the same loop, so the // finally block will return control inside the loop. break; case BBJ_THROW: case BBJ_RETURN: // Those are exits from the loop lastExit = block; exitCount++; break; case BBJ_SWITCH: for (BasicBlock* const exitPoint : block->SwitchTargets()) { if (!loopBlocks.IsMember(exitPoint->bbNum)) { lastExit = block; exitCount++; } } break; default: noway_assert(!"Unexpected bbJumpKind"); break; } if (block->bbFallsThrough() && !loopBlocks.IsMember(block->bbNext->bbNum)) { // Found a fall-through exit. lastExit = block; exitCount++; } } }; } // end (anonymous) namespace //------------------------------------------------------------------------ // optFindNaturalLoops: Find the natural loops, using dominators. Note that the test for // a loop is slightly different from the standard one, because we have not done a depth // first reordering of the basic blocks. // // See LoopSearch class comment header for a description of the loops found. // // We will find and record a maximum of BasicBlock::MAX_LOOP_NUM loops (currently 64). // void Compiler::optFindNaturalLoops() { #ifdef DEBUG if (verbose) { printf("*************** In optFindNaturalLoops()\n"); } #endif // DEBUG noway_assert(fgDomsComputed); assert(fgHasLoops); #if COUNT_LOOPS hasMethodLoops = false; loopsThisMethod = 0; loopOverflowThisMethod = false; #endif LoopSearch search(this); for (BasicBlock* head = fgFirstBB; head->bbNext != nullptr; head = head->bbNext) { BasicBlock* top = head->bbNext; // Blocks that are rarely run have a zero bbWeight and should never be optimized here. if (top->bbWeight == BB_ZERO_WEIGHT) { continue; } for (BasicBlock* const predBlock : top->PredBlocks()) { if (search.FindLoop(head, top, predBlock)) { // Found a loop; record it and see if we've hit the limit. bool recordedLoop = search.RecordLoop(); (void)recordedLoop; // avoid unusued variable warnings in COUNT_LOOPS and !DEBUG #if COUNT_LOOPS if (!hasMethodLoops) { // Mark the method as containing natural loops totalLoopMethods++; hasMethodLoops = true; } // Increment total number of loops found totalLoopCount++; loopsThisMethod++; // Keep track of the number of exits loopExitCountTable.record(static_cast<unsigned>(search.GetExitCount())); // Note that we continue to look for loops even if // (optLoopCount == BasicBlock::MAX_LOOP_NUM), in contrast to the !COUNT_LOOPS code below. // This gives us a better count and stats. Hopefully it doesn't affect actual codegen. CLANG_FORMAT_COMMENT_ANCHOR; #else // COUNT_LOOPS assert(recordedLoop); if (optLoopCount == BasicBlock::MAX_LOOP_NUM) { // We won't be able to record any more loops, so stop looking. goto NO_MORE_LOOPS; } #endif // COUNT_LOOPS // Continue searching preds of `top` to see if any other are // back-edges (this can happen for nested loops). The iteration // is safe because the compaction we do only modifies predecessor // lists of blocks that gain or lose fall-through from their // `bbPrev`, but since the motion is from within the loop to below // it, we know we're not altering the relationship between `top` // and its `bbPrev`. } } } #if !COUNT_LOOPS NO_MORE_LOOPS: #endif // !COUNT_LOOPS #if COUNT_LOOPS loopCountTable.record(loopsThisMethod); if (maxLoopsPerMethod < loopsThisMethod) { maxLoopsPerMethod = loopsThisMethod; } if (loopOverflowThisMethod) { totalLoopOverflows++; } #endif // COUNT_LOOPS bool mod = search.ChangedFlowGraph(); if (mod) { // Need to renumber blocks now since loop canonicalization // depends on it; can defer the rest of fgUpdateChangedFlowGraph() // until after canonicalizing loops. Dominator information is // recorded in terms of block numbers, so flag it invalid. fgDomsComputed = false; fgRenumberBlocks(); } // Now the loop indices are stable. We can figure out parent/child relationships // (using table indices to name loops), and label blocks. for (unsigned char loopInd = 1; loopInd < optLoopCount; loopInd++) { for (unsigned char possibleParent = loopInd; possibleParent > 0;) { possibleParent--; if (optLoopTable[possibleParent].lpContains(optLoopTable[loopInd])) { optLoopTable[loopInd].lpParent = possibleParent; optLoopTable[loopInd].lpSibling = optLoopTable[possibleParent].lpChild; optLoopTable[possibleParent].lpChild = loopInd; break; } } } // Now label the blocks with the innermost loop to which they belong. Since parents // precede children in the table, doing the labeling for each loop in order will achieve // this -- the innermost loop labeling will be done last. (Inner loop blocks will be // labeled multiple times before being correct at the end.) for (unsigned char loopInd = 0; loopInd < optLoopCount; loopInd++) { for (BasicBlock* const blk : optLoopTable[loopInd].LoopBlocks()) { blk->bbNatLoopNum = loopInd; } } // Make sure that loops are canonical: that every loop has a unique "top", by creating an empty "nop" // one, if necessary, for loops containing others that share a "top." for (unsigned char loopInd = 0; loopInd < optLoopCount; loopInd++) { // Traverse the outermost loops as entries into the loop nest; so skip non-outermost. if (optLoopTable[loopInd].lpParent != BasicBlock::NOT_IN_LOOP) { continue; } // Otherwise... if (optCanonicalizeLoopNest(loopInd)) { mod = true; } } if (mod) { constexpr bool computePreds = true; fgUpdateChangedFlowGraph(computePreds); } if (false /* pre-header stress */) { // Stress mode: aggressively create loop pre-header for every loop. for (unsigned loopInd = 0; loopInd < optLoopCount; loopInd++) { fgCreateLoopPreHeader(loopInd); } if (fgModified) { // The predecessors were maintained in fgCreateLoopPreHeader; don't rebuild them. constexpr bool computePreds = false; constexpr bool computeDoms = true; fgUpdateChangedFlowGraph(computePreds, computeDoms); } } #ifdef DEBUG if (verbose && (optLoopCount > 0)) { optPrintLoopTable(); } #endif // DEBUG } //------------------------------------------------------------------------ // optIdentifyLoopsForAlignment: Determine which loops should be considered for alignment. // // All innermost loops whose block weight meets a threshold are candidates for alignment. // The `first` block of the loop is marked with the BBF_LOOP_ALIGN flag to indicate this // (the loop table itself is not changed). // // Depends on the loop table, and on block weights being set. // void Compiler::optIdentifyLoopsForAlignment() { #if FEATURE_LOOP_ALIGN if (codeGen->ShouldAlignLoops()) { for (BasicBlock::loopNumber loopInd = 0; loopInd < optLoopCount; loopInd++) { // An innerloop candidate that might need alignment if (optLoopTable[loopInd].lpChild == BasicBlock::NOT_IN_LOOP) { BasicBlock* top = optLoopTable[loopInd].lpTop; weight_t topWeight = top->getBBWeight(this); if (topWeight >= (opts.compJitAlignLoopMinBlockWeight * BB_UNITY_WEIGHT)) { // Sometimes with JitOptRepeat > 1, we might end up finding the loops twice. In such // cases, make sure to count them just once. if (!top->isLoopAlign()) { loopAlignCandidates++; top->bbFlags |= BBF_LOOP_ALIGN; JITDUMP(FMT_LP " that starts at " FMT_BB " needs alignment, weight=" FMT_WT ".\n", loopInd, top->bbNum, top->getBBWeight(this)); } } else { JITDUMP("Skip alignment for " FMT_LP " that starts at " FMT_BB " weight=" FMT_WT ".\n", loopInd, top->bbNum, topWeight); } } } } #endif } //------------------------------------------------------------------------ // optRedirectBlock: Replace the branch successors of a block based on a block map. // // Updates the successors of `blk`: if `blk2` is a branch successor of `blk`, and there is a mapping // for `blk2->blk3` in `redirectMap`, change `blk` so that `blk3` is this branch successor. // // Note that fall-through successors are not modified, including predecessor lists. // // Arguments: // blk - block to redirect // redirectMap - block->block map specifying how the `blk` target will be redirected. // updatePreds - if `true`, update the predecessor lists to match. // void Compiler::optRedirectBlock(BasicBlock* blk, BlockToBlockMap* redirectMap, const bool updatePreds) { BasicBlock* newJumpDest = nullptr; switch (blk->bbJumpKind) { case BBJ_NONE: case BBJ_THROW: case BBJ_RETURN: case BBJ_EHFILTERRET: case BBJ_EHFINALLYRET: case BBJ_EHCATCHRET: // These have no jump destination to update. break; case BBJ_ALWAYS: case BBJ_LEAVE: case BBJ_CALLFINALLY: case BBJ_COND: // All of these have a single jump destination to update. if (redirectMap->Lookup(blk->bbJumpDest, &newJumpDest)) { if (updatePreds) { fgRemoveRefPred(blk->bbJumpDest, blk); fgAddRefPred(newJumpDest, blk); } blk->bbJumpDest = newJumpDest; } break; case BBJ_SWITCH: { bool redirected = false; for (unsigned i = 0; i < blk->bbJumpSwt->bbsCount; i++) { BasicBlock* switchDest = blk->bbJumpSwt->bbsDstTab[i]; if (redirectMap->Lookup(switchDest, &newJumpDest)) { if (updatePreds) { fgRemoveRefPred(switchDest, blk); fgAddRefPred(newJumpDest, blk); } blk->bbJumpSwt->bbsDstTab[i] = newJumpDest; redirected = true; } } // If any redirections happened, invalidate the switch table map for the switch. if (redirected) { // Don't create a new map just to try to remove an entry. BlockToSwitchDescMap* switchMap = GetSwitchDescMap(/* createIfNull */ false); if (switchMap != nullptr) { switchMap->Remove(blk); } } } break; default: unreached(); } } // TODO-Cleanup: This should be a static member of the BasicBlock class. void Compiler::optCopyBlkDest(BasicBlock* from, BasicBlock* to) { assert(from->bbJumpKind == to->bbJumpKind); // Precondition. // copy the jump destination(s) from "from" to "to". switch (to->bbJumpKind) { case BBJ_ALWAYS: case BBJ_LEAVE: case BBJ_CALLFINALLY: case BBJ_COND: // All of these have a single jump destination to update. to->bbJumpDest = from->bbJumpDest; break; case BBJ_SWITCH: to->bbJumpSwt = new (this, CMK_BasicBlock) BBswtDesc(this, from->bbJumpSwt); break; default: break; } } // Returns true if 'block' is an entry block for any loop in 'optLoopTable' bool Compiler::optIsLoopEntry(BasicBlock* block) const { for (unsigned char loopInd = 0; loopInd < optLoopCount; loopInd++) { if ((optLoopTable[loopInd].lpFlags & LPFLG_REMOVED) != 0) { continue; } if (optLoopTable[loopInd].lpEntry == block) { return true; } } return false; } // Canonicalize the loop nest rooted at parent loop 'loopInd'. // Returns 'true' if the flow graph is modified. bool Compiler::optCanonicalizeLoopNest(unsigned char loopInd) { bool modified = false; // Is the top of the current loop in any nested loop? if (optLoopTable[loopInd].lpTop->bbNatLoopNum != loopInd) { if (optCanonicalizeLoop(loopInd)) { modified = true; } } for (unsigned char child = optLoopTable[loopInd].lpChild; // child != BasicBlock::NOT_IN_LOOP; // child = optLoopTable[child].lpSibling) { if (optCanonicalizeLoopNest(child)) { modified = true; } } return modified; } bool Compiler::optCanonicalizeLoop(unsigned char loopInd) { // Is the top uniquely part of the current loop? BasicBlock* t = optLoopTable[loopInd].lpTop; if (t->bbNatLoopNum == loopInd) { return false; } JITDUMP("in optCanonicalizeLoop: " FMT_LP " has top " FMT_BB " (bottom " FMT_BB ") with natural loop number " FMT_LP ": need to canonicalize\n", loopInd, t->bbNum, optLoopTable[loopInd].lpBottom->bbNum, t->bbNatLoopNum); // Otherwise, the top of this loop is also part of a nested loop. // // Insert a new unique top for this loop. We must be careful to put this new // block in the correct EH region. Note that t->bbPrev might be in a different // EH region. For example: // // try { // ... // BB07 // } // BB08 // "first" // // In this case, first->bbPrev is BB07, which is in a different 'try' region. // On the other hand, the first block of multiple loops might be the first // block of a 'try' region that is completely contained in the multiple loops. // for example: // // BB08 try { } // ... // BB10 BBJ_ALWAYS => BB08 // ... // BB12 BBJ_ALWAYS => BB08 // // Here, we have two loops, both with BB08 as the "first" block. Block BB08 // is a single-block "try" region. Neither loop "bottom" block is in the same // "try" region as BB08. This is legal because you can jump to the first block // of a try region. With EH normalization, no two "try" regions will share // this block. In this case, we need to insert a new block for the outer loop // in the same EH region as the branch from the "bottom": // // BB30 BBJ_NONE // BB08 try { } // ... // BB10 BBJ_ALWAYS => BB08 // ... // BB12 BBJ_ALWAYS => BB30 // // Another possibility is that the "first" block of the loop nest can be the first block // of a "try" region that also has other predecessors than those in the loop, or even in // the "try" region (since blocks can target the first block of a "try" region). For example: // // BB08 try { // ... // BB10 BBJ_ALWAYS => BB08 // ... // BB12 BBJ_ALWAYS => BB08 // BB13 } // ... // BB20 BBJ_ALWAYS => BB08 // ... // BB25 BBJ_ALWAYS => BB08 // // Here, BB08 has 4 flow graph predecessors: BB10, BB12, BB20, BB25. These are all potential loop // bottoms, for four possible nested loops. However, we require all the loop bottoms to be in the // same EH region. For loops BB08..BB10 and BB08..BB12, we need to add a new "top" block within // the try region, immediately before BB08. The bottom of the loop BB08..BB10 loop will target the // old BB08, and the bottom of the BB08..BB12 loop will target the new loop header. The other branches // (BB20, BB25) must target the new loop header, both for correctness, and to avoid the illegal // situation of branching to a non-first block of a 'try' region. // // We can also have a loop nest where the "first" block is outside of a "try" region // and the back edges are inside a "try" region, for example: // // BB02 // "first" // ... // BB09 try { BBJ_COND => BB02 // ... // BB15 BBJ_COND => BB02 // ... // BB21 } // end of "try" // // In this case, both loop back edges were formed by "leave" instructions that were // imported into branches that were later made conditional. In this case, we don't // want to copy the EH region of the back edge, since that would create a block // outside of and disjoint with the "try" region of the back edge. However, to // simplify things, we disqualify this type of loop, so we should never see this here. BasicBlock* h = optLoopTable[loopInd].lpHead; BasicBlock* b = optLoopTable[loopInd].lpBottom; // The loop must be entirely contained within a single handler region. assert(BasicBlock::sameHndRegion(t, b)); // If the bottom block is in the same "try" region, then we extend the EH // region. Otherwise, we add the new block outside the "try" region. const bool extendRegion = BasicBlock::sameTryRegion(t, b); BasicBlock* newT = fgNewBBbefore(BBJ_NONE, t, extendRegion); if (!extendRegion) { // We need to set the EH region manually. Set it to be the same // as the bottom block. newT->copyEHRegion(b); } // The new block can reach the same set of blocks as the old one, but don't try to reflect // that in its reachability set here -- creating the new block may have changed the BlockSet // representation from short to long, and canonicalizing loops is immediately followed by // a call to fgUpdateChangedFlowGraph which will recompute the reachability sets anyway. // Redirect the "bottom" of the current loop to "newT". BlockToBlockMap* blockMap = new (getAllocator(CMK_LoopOpt)) BlockToBlockMap(getAllocator(CMK_LoopOpt)); blockMap->Set(t, newT); optRedirectBlock(b, blockMap); // Redirect non-loop preds of "t" to also go to "newT". Inner loops that also branch to "t" should continue // to do so. However, there maybe be other predecessors from outside the loop nest that need to be updated // to point to "newT". This normally wouldn't happen, since they too would be part of the loop nest. However, // they might have been prevented from participating in the loop nest due to different EH nesting, or some // other reason. // // Note that optRedirectBlock doesn't update the predecessors list. So, if the same 't' block is processed // multiple times while canonicalizing multiple loop nests, we'll attempt to redirect a predecessor multiple times. // This is ok, because after the first redirection, the topPredBlock branch target will no longer match the source // edge of the blockMap, so nothing will happen. bool firstPred = true; for (BasicBlock* const topPredBlock : t->PredBlocks()) { // Skip if topPredBlock is in the loop. // Note that this uses block number to detect membership in the loop. We are adding blocks during // canonicalization, and those block numbers will be new, and larger than previous blocks. However, we work // outside-in, so we shouldn't encounter the new blocks at the loop boundaries, or in the predecessor lists. if (t->bbNum <= topPredBlock->bbNum && topPredBlock->bbNum <= b->bbNum) { JITDUMP("in optCanonicalizeLoop: 'top' predecessor " FMT_BB " is in the range of " FMT_LP " (" FMT_BB ".." FMT_BB "); not redirecting its bottom edge\n", topPredBlock->bbNum, loopInd, t->bbNum, b->bbNum); continue; } JITDUMP("in optCanonicalizeLoop: redirect top predecessor " FMT_BB " to " FMT_BB "\n", topPredBlock->bbNum, newT->bbNum); optRedirectBlock(topPredBlock, blockMap); // When we have profile data then the 'newT' block will inherit topPredBlock profile weight if (topPredBlock->hasProfileWeight()) { // This corrects an issue when the topPredBlock has a profile based weight // if (firstPred) { JITDUMP("in optCanonicalizeLoop: block " FMT_BB " will inheritWeight from " FMT_BB "\n", newT->bbNum, topPredBlock->bbNum); newT->inheritWeight(topPredBlock); firstPred = false; } else { JITDUMP("in optCanonicalizeLoop: block " FMT_BB " will also contribute to the weight of " FMT_BB "\n", newT->bbNum, topPredBlock->bbNum); weight_t newWeight = newT->getBBWeight(this) + topPredBlock->getBBWeight(this); newT->setBBProfileWeight(newWeight); } } } assert(newT->bbNext == t); // If it had been a do-while loop (top == entry), update entry, as well. BasicBlock* origE = optLoopTable[loopInd].lpEntry; if (optLoopTable[loopInd].lpTop == origE) { optLoopTable[loopInd].lpEntry = newT; } optLoopTable[loopInd].lpTop = newT; newT->bbNatLoopNum = loopInd; JITDUMP("in optCanonicalizeLoop: made new block " FMT_BB " [%p] the new unique top of loop %d.\n", newT->bbNum, dspPtr(newT), loopInd); // Make sure the head block still goes to the entry... if (h->bbJumpKind == BBJ_NONE && h->bbNext != optLoopTable[loopInd].lpEntry) { h->bbJumpKind = BBJ_ALWAYS; h->bbJumpDest = optLoopTable[loopInd].lpEntry; } else if (h->bbJumpKind == BBJ_COND && h->bbNext == newT && newT != optLoopTable[loopInd].lpEntry) { BasicBlock* h2 = fgNewBBafter(BBJ_ALWAYS, h, /*extendRegion*/ true); optLoopTable[loopInd].lpHead = h2; h2->bbJumpDest = optLoopTable[loopInd].lpEntry; h2->bbStmtList = nullptr; fgInsertStmtAtEnd(h2, fgNewStmtFromTree(gtNewOperNode(GT_NOP, TYP_VOID, nullptr))); } // If any loops nested in "loopInd" have the same head and entry as "loopInd", // it must be the case that they were do-while's (since "h" fell through to the entry). // The new node "newT" becomes the head of such loops. for (unsigned char childLoop = optLoopTable[loopInd].lpChild; // childLoop != BasicBlock::NOT_IN_LOOP; // childLoop = optLoopTable[childLoop].lpSibling) { if (optLoopTable[childLoop].lpEntry == origE && optLoopTable[childLoop].lpHead == h && newT->bbJumpKind == BBJ_NONE && newT->bbNext == origE) { optUpdateLoopHead(childLoop, h, newT); } } return true; } //----------------------------------------------------------------------------- // optLoopContains: Check if one loop contains another // // Arguments: // l1 -- loop num of containing loop (must be valid loop num) // l2 -- loop num of contained loop (valid loop num, or NOT_IN_LOOP) // // Returns: // True if loop described by l2 is contained within l1. // // Notes: // A loop contains itself. // bool Compiler::optLoopContains(unsigned l1, unsigned l2) const { assert(l1 < optLoopCount); assert((l2 < optLoopCount) || (l2 == BasicBlock::NOT_IN_LOOP)); if (l1 == l2) { return true; } else if (l2 == BasicBlock::NOT_IN_LOOP) { return false; } else { return optLoopContains(l1, optLoopTable[l2].lpParent); } } void Compiler::optUpdateLoopHead(unsigned loopInd, BasicBlock* from, BasicBlock* to) { assert(optLoopTable[loopInd].lpHead == from); optLoopTable[loopInd].lpHead = to; for (unsigned char childLoop = optLoopTable[loopInd].lpChild; childLoop != BasicBlock::NOT_IN_LOOP; childLoop = optLoopTable[childLoop].lpSibling) { if (optLoopTable[childLoop].lpHead == from) { optUpdateLoopHead(childLoop, from, to); } } } //----------------------------------------------------------------------------- // optIterSmallOverflow: Helper for loop unrolling. Determine if "i += const" will // cause an overflow exception for the small types. // // Arguments: // iterAtExit - iteration constant at loop exit // incrType - type of increment // // Returns: // true if overflow // // static bool Compiler::optIterSmallOverflow(int iterAtExit, var_types incrType) { int type_MAX; switch (incrType) { case TYP_BYTE: type_MAX = SCHAR_MAX; break; case TYP_UBYTE: type_MAX = UCHAR_MAX; break; case TYP_SHORT: type_MAX = SHRT_MAX; break; case TYP_USHORT: type_MAX = USHRT_MAX; break; case TYP_UINT: // Detected by checking for 32bit .... case TYP_INT: return false; // ... overflow same as done for TYP_INT default: NO_WAY("Bad type"); } if (iterAtExit > type_MAX) { return true; } else { return false; } } //----------------------------------------------------------------------------- // optIterSmallUnderflow: Helper for loop unrolling. Determine if "i -= const" will // cause an underflow exception for the small types. // // Arguments: // iterAtExit - iteration constant at loop exit // decrType - type of decrement // // Returns: // true if overflow // // static bool Compiler::optIterSmallUnderflow(int iterAtExit, var_types decrType) { int type_MIN; switch (decrType) { case TYP_BYTE: type_MIN = SCHAR_MIN; break; case TYP_SHORT: type_MIN = SHRT_MIN; break; case TYP_UBYTE: type_MIN = 0; break; case TYP_USHORT: type_MIN = 0; break; case TYP_UINT: // Detected by checking for 32bit .... case TYP_INT: return false; // ... underflow same as done for TYP_INT default: NO_WAY("Bad type"); } if (iterAtExit < type_MIN) { return true; } else { return false; } } //----------------------------------------------------------------------------- // optComputeLoopRep: Helper for loop unrolling. Computes the number of repetitions // in a constant loop. // // Arguments: // constInit - loop constant initial value // constLimit - loop constant limit // iterInc - loop iteration increment // iterOper - loop iteration increment operator (ADD, SUB, etc.) // iterOperType - iteration operator type // testOper - type of loop test (i.e. GT_LE, GT_GE, etc.) // unsTest - true if test is unsigned // dupCond - true if the loop head contains a test which skips this loop // iterCount - *iterCount is set to the iteration count, if the function returns `true` // // Returns: // true if the loop has a constant repetition count, false if that cannot be proven // bool Compiler::optComputeLoopRep(int constInit, int constLimit, int iterInc, genTreeOps iterOper, var_types iterOperType, genTreeOps testOper, bool unsTest, bool dupCond, unsigned* iterCount) { noway_assert(genActualType(iterOperType) == TYP_INT); __int64 constInitX; __int64 constLimitX; unsigned loopCount; int iterSign; // Using this, we can just do a signed comparison with other 32 bit values. if (unsTest) { constLimitX = (unsigned int)constLimit; } else { constLimitX = (signed int)constLimit; } switch (iterOperType) { // For small types, the iteration operator will narrow these values if big #define INIT_ITER_BY_TYPE(type) \ constInitX = (type)constInit; \ iterInc = (type)iterInc; case TYP_BYTE: INIT_ITER_BY_TYPE(signed char); break; case TYP_UBYTE: INIT_ITER_BY_TYPE(unsigned char); break; case TYP_SHORT: INIT_ITER_BY_TYPE(signed short); break; case TYP_USHORT: INIT_ITER_BY_TYPE(unsigned short); break; // For the big types, 32 bit arithmetic is performed case TYP_INT: case TYP_UINT: if (unsTest) { constInitX = (unsigned int)constInit; } else { constInitX = (signed int)constInit; } break; default: noway_assert(!"Bad type"); NO_WAY("Bad type"); } // If iterInc is zero we have an infinite loop. if (iterInc == 0) { return false; } // Set iterSign to +1 for positive iterInc and -1 for negative iterInc. iterSign = (iterInc > 0) ? +1 : -1; // Initialize loopCount to zero. loopCount = 0; // If dupCond is true then the loop head contains a test which skips // this loop, if the constInit does not pass the loop test. // Such a loop can execute zero times. // If dupCond is false then we have a true do-while loop which we // always execute the loop once before performing the loop test if (!dupCond) { loopCount += 1; constInitX += iterInc; } // bail if count is based on wrap-around math if (iterInc > 0) { if (constLimitX < constInitX) { return false; } } else if (constLimitX > constInitX) { return false; } // Compute the number of repetitions. switch (testOper) { __int64 iterAtExitX; case GT_EQ: // Something like "for (i=init; i == lim; i++)" doesn't make any sense. return false; case GT_NE: // Consider: "for (i = init; i != lim; i += const)" // This is tricky since it may have a constant number of iterations or loop forever. // We have to compute "(lim - init) mod iterInc" to see if it is zero. // If "mod iterInc" is not zero then the limit test will miss and a wrap will occur // which is probably not what the end user wanted, but it is legal. if (iterInc > 0) { // Stepping by one, i.e. Mod with 1 is always zero. if (iterInc != 1) { if (((constLimitX - constInitX) % iterInc) != 0) { return false; } } } else { noway_assert(iterInc < 0); // Stepping by -1, i.e. Mod with 1 is always zero. if (iterInc != -1) { if (((constInitX - constLimitX) % (-iterInc)) != 0) { return false; } } } switch (iterOper) { case GT_SUB: iterInc = -iterInc; FALLTHROUGH; case GT_ADD: if (constInitX != constLimitX) { loopCount += (unsigned)((constLimitX - constInitX - iterSign) / iterInc) + 1; } iterAtExitX = (int)(constInitX + iterInc * (int)loopCount); if (unsTest) { iterAtExitX = (unsigned)iterAtExitX; } // Check if iteration incr will cause overflow for small types if (optIterSmallOverflow((int)iterAtExitX, iterOperType)) { return false; } // iterator with 32bit overflow. Bad for TYP_(U)INT if (iterAtExitX < constLimitX) { return false; } *iterCount = loopCount; return true; case GT_MUL: case GT_DIV: case GT_RSH: case GT_LSH: case GT_UDIV: return false; default: noway_assert(!"Unknown operator for loop iterator"); return false; } case GT_LT: switch (iterOper) { case GT_SUB: iterInc = -iterInc; FALLTHROUGH; case GT_ADD: if (constInitX < constLimitX) { loopCount += (unsigned)((constLimitX - constInitX - iterSign) / iterInc) + 1; } iterAtExitX = (int)(constInitX + iterInc * (int)loopCount); if (unsTest) { iterAtExitX = (unsigned)iterAtExitX; } // Check if iteration incr will cause overflow for small types if (optIterSmallOverflow((int)iterAtExitX, iterOperType)) { return false; } // iterator with 32bit overflow. Bad for TYP_(U)INT if (iterAtExitX < constLimitX) { return false; } *iterCount = loopCount; return true; case GT_MUL: case GT_DIV: case GT_RSH: case GT_LSH: case GT_UDIV: return false; default: noway_assert(!"Unknown operator for loop iterator"); return false; } case GT_LE: switch (iterOper) { case GT_SUB: iterInc = -iterInc; FALLTHROUGH; case GT_ADD: if (constInitX <= constLimitX) { loopCount += (unsigned)((constLimitX - constInitX) / iterInc) + 1; } iterAtExitX = (int)(constInitX + iterInc * (int)loopCount); if (unsTest) { iterAtExitX = (unsigned)iterAtExitX; } // Check if iteration incr will cause overflow for small types if (optIterSmallOverflow((int)iterAtExitX, iterOperType)) { return false; } // iterator with 32bit overflow. Bad for TYP_(U)INT if (iterAtExitX <= constLimitX) { return false; } *iterCount = loopCount; return true; case GT_MUL: case GT_DIV: case GT_RSH: case GT_LSH: case GT_UDIV: return false; default: noway_assert(!"Unknown operator for loop iterator"); return false; } case GT_GT: switch (iterOper) { case GT_SUB: iterInc = -iterInc; FALLTHROUGH; case GT_ADD: if (constInitX > constLimitX) { loopCount += (unsigned)((constLimitX - constInitX - iterSign) / iterInc) + 1; } iterAtExitX = (int)(constInitX + iterInc * (int)loopCount); if (unsTest) { iterAtExitX = (unsigned)iterAtExitX; } // Check if small types will underflow if (optIterSmallUnderflow((int)iterAtExitX, iterOperType)) { return false; } // iterator with 32bit underflow. Bad for TYP_INT and unsigneds if (iterAtExitX > constLimitX) { return false; } *iterCount = loopCount; return true; case GT_MUL: case GT_DIV: case GT_RSH: case GT_LSH: case GT_UDIV: return false; default: noway_assert(!"Unknown operator for loop iterator"); return false; } case GT_GE: switch (iterOper) { case GT_SUB: iterInc = -iterInc; FALLTHROUGH; case GT_ADD: if (constInitX >= constLimitX) { loopCount += (unsigned)((constLimitX - constInitX) / iterInc) + 1; } iterAtExitX = (int)(constInitX + iterInc * (int)loopCount); if (unsTest) { iterAtExitX = (unsigned)iterAtExitX; } // Check if small types will underflow if (optIterSmallUnderflow((int)iterAtExitX, iterOperType)) { return false; } // iterator with 32bit underflow. Bad for TYP_INT and unsigneds if (iterAtExitX >= constLimitX) { return false; } *iterCount = loopCount; return true; case GT_MUL: case GT_DIV: case GT_RSH: case GT_LSH: case GT_UDIV: return false; default: noway_assert(!"Unknown operator for loop iterator"); return false; } default: noway_assert(!"Unknown operator for loop condition"); } return false; } #ifdef _PREFAST_ #pragma warning(push) #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function #endif //----------------------------------------------------------------------------- // optUnrollLoops: Look for loop unrolling candidates and unroll them. // // Loops must be of the form: // for (i=icon; i<icon; i++) { ... } // // Loops handled are fully unrolled; there is no partial unrolling. // // Limitations: only the following loop types are handled: // 1. "while" loops (top entry) // 2. constant initializer, constant bound // 3. The entire loop must be in the same EH region. // 4. The loop iteration variable can't be address exposed. // 5. The loop iteration variable can't be a promoted struct field. // 6. We must be able to calculate the total constant iteration count. // 7. On x86, there is a limit to the number of return blocks. So if there are return blocks in the loop that // would be unrolled, the unrolled code can't exceed that limit. // // Cost heuristics: // 1. there are cost metrics for maximum number of allowed iterations, and maximum unroll size // 2. single-iteration loops are always allowed (to eliminate the loop structure). // 3. otherwise, only loops where the limit is Vector<T>.Length are currently allowed // // In stress modes, these heuristic limits are expanded, and loops aren't required to have the // Vector<T>.Length limit. // // Loops are processed from innermost to outermost order, to attempt to unroll the most nested loops first. // // Returns: // suitable phase status // PhaseStatus Compiler::optUnrollLoops() { if (compCodeOpt() == SMALL_CODE) { return PhaseStatus::MODIFIED_NOTHING; } if (optLoopCount == 0) { return PhaseStatus::MODIFIED_NOTHING; } #ifdef DEBUG if (JitConfig.JitNoUnroll()) { return PhaseStatus::MODIFIED_NOTHING; } #endif #ifdef DEBUG if (verbose) { printf("*************** In optUnrollLoops()\n"); } #endif /* Look for loop unrolling candidates */ bool change = false; bool anyNestedLoopsUnrolled = false; INDEBUG(int unrollCount = 0); // count of loops unrolled INDEBUG(int unrollFailures = 0); // count of loops attempted to be unrolled, but failed static const unsigned ITER_LIMIT[COUNT_OPT_CODE + 1] = { 10, // BLENDED_CODE 0, // SMALL_CODE 20, // FAST_CODE 0 // COUNT_OPT_CODE }; assert(ITER_LIMIT[SMALL_CODE] == 0); assert(ITER_LIMIT[COUNT_OPT_CODE] == 0); unsigned iterLimit = ITER_LIMIT[compCodeOpt()]; #ifdef DEBUG if (compStressCompile(STRESS_UNROLL_LOOPS, 50)) { iterLimit *= 10; } #endif static const int UNROLL_LIMIT_SZ[COUNT_OPT_CODE + 1] = { 300, // BLENDED_CODE 0, // SMALL_CODE 600, // FAST_CODE 0 // COUNT_OPT_CODE }; assert(UNROLL_LIMIT_SZ[SMALL_CODE] == 0); assert(UNROLL_LIMIT_SZ[COUNT_OPT_CODE] == 0); // Visit loops from highest to lowest number to visit them in innermost to outermost order. for (unsigned lnum = optLoopCount - 1; lnum != ~0U; --lnum) { // This is necessary due to an apparent analysis limitation since // optLoopCount must be strictly greater than 0 upon entry and lnum // cannot wrap due to the loop termination condition. PREFAST_ASSUME(lnum != 0U - 1); LoopDsc& loop = optLoopTable[lnum]; BasicBlock* head; BasicBlock* top; BasicBlock* bottom; BasicBlock* initBlock; bool dupCond; // Does the 'head' block contain a duplicate loop condition (zero trip test)? int lbeg; // initial value for iterator int llim; // limit value for iterator unsigned lvar; // iterator lclVar # int iterInc; // value to increment the iterator genTreeOps iterOper; // type of iterator increment (i.e. ADD, SUB, etc.) var_types iterOperType; // type result of the oper (for overflow instrs) genTreeOps testOper; // type of loop test (i.e. GT_LE, GT_GE, etc.) bool unsTest; // Is the comparison unsigned? unsigned loopRetCount; // number of BBJ_RETURN blocks in loop unsigned totalIter; // total number of iterations in the constant loop const unsigned loopFlags = loop.lpFlags; // Check for required flags: // LPFLG_CONST_INIT - required because this transform only handles full unrolls // LPFLG_CONST_LIMIT - required because this transform only handles full unrolls const unsigned requiredFlags = LPFLG_CONST_INIT | LPFLG_CONST_LIMIT; if ((loopFlags & requiredFlags) != requiredFlags) { // Don't print to the JitDump about this common case. continue; } // Ignore if removed or marked as not unrollable. if (loopFlags & (LPFLG_DONT_UNROLL | LPFLG_REMOVED)) { // Don't print to the JitDump about this common case. continue; } // This transform only handles loops of this form if (!loop.lpIsTopEntry()) { JITDUMP("Failed to unroll loop " FMT_LP ": not top entry\n", lnum); continue; } head = loop.lpHead; noway_assert(head != nullptr); top = loop.lpTop; noway_assert(top != nullptr); bottom = loop.lpBottom; noway_assert(bottom != nullptr); // Get the loop data: // - initial constant // - limit constant // - iterator // - iterator increment // - increment operation type (i.e. ADD, SUB, etc...) // - loop test type (i.e. GT_GE, GT_LT, etc...) initBlock = loop.lpInitBlock; lbeg = loop.lpConstInit; llim = loop.lpConstLimit(); testOper = loop.lpTestOper(); lvar = loop.lpIterVar(); iterInc = loop.lpIterConst(); iterOper = loop.lpIterOper(); iterOperType = loop.lpIterOperType(); unsTest = (loop.lpTestTree->gtFlags & GTF_UNSIGNED) != 0; if (lvaTable[lvar].IsAddressExposed()) { // If the loop iteration variable is address-exposed then bail JITDUMP("Failed to unroll loop " FMT_LP ": V%02u is address exposed\n", lnum, lvar); continue; } if (lvaTable[lvar].lvIsStructField) { // If the loop iteration variable is a promoted field from a struct then bail JITDUMP("Failed to unroll loop " FMT_LP ": V%02u is a promoted struct field\n", lnum, lvar); continue; } // Locate/initialize the increment/test statements. Statement* initStmt = initBlock->lastStmt(); noway_assert((initStmt != nullptr) && (initStmt->GetNextStmt() == nullptr)); Statement* testStmt = bottom->lastStmt(); noway_assert((testStmt != nullptr) && (testStmt->GetNextStmt() == nullptr)); Statement* incrStmt = testStmt->GetPrevStmt(); noway_assert(incrStmt != nullptr); if (initStmt->GetRootNode()->OperIs(GT_JTRUE)) { // Must be a duplicated loop condition. dupCond = true; initStmt = initStmt->GetPrevStmt(); noway_assert(initStmt != nullptr); } else { dupCond = false; } // Find the number of iterations - the function returns false if not a constant number. if (!optComputeLoopRep(lbeg, llim, iterInc, iterOper, iterOperType, testOper, unsTest, dupCond, &totalIter)) { JITDUMP("Failed to unroll loop " FMT_LP ": not a constant iteration count\n", lnum); continue; } // Forget it if there are too many repetitions or not a constant loop. if (totalIter > iterLimit) { JITDUMP("Failed to unroll loop " FMT_LP ": too many iterations (%d > %d) (heuristic)\n", lnum, totalIter, iterLimit); continue; } int unrollLimitSz = UNROLL_LIMIT_SZ[compCodeOpt()]; if (INDEBUG(compStressCompile(STRESS_UNROLL_LOOPS, 50) ||) false) { // In stress mode, quadruple the size limit, and drop // the restriction that loop limit must be vector element count. unrollLimitSz *= 4; } else if (totalIter <= 1) { // No limit for single iteration loops // If there is no iteration (totalIter == 0), we will remove the loop body entirely. unrollLimitSz = INT_MAX; } else if (!(loopFlags & LPFLG_SIMD_LIMIT)) { // Otherwise unroll only if limit is Vector_.Length // (as a heuristic, not for correctness/structural reasons) JITDUMP("Failed to unroll loop " FMT_LP ": constant limit isn't Vector<T>.Length (heuristic)\n", lnum); continue; } GenTree* incr = incrStmt->GetRootNode(); // Don't unroll loops we don't understand. if (incr->gtOper != GT_ASG) { JITDUMP("Failed to unroll loop " FMT_LP ": unknown increment op (%s)\n", lnum, GenTree::OpName(incr->gtOper)); continue; } incr = incr->AsOp()->gtOp2; GenTree* init = initStmt->GetRootNode(); // Make sure everything looks ok. // clang-format off if ((init->gtOper != GT_ASG) || (init->AsOp()->gtOp1->gtOper != GT_LCL_VAR) || (init->AsOp()->gtOp1->AsLclVarCommon()->GetLclNum() != lvar) || (init->AsOp()->gtOp2->gtOper != GT_CNS_INT) || (init->AsOp()->gtOp2->AsIntCon()->gtIconVal != lbeg) || !((incr->gtOper == GT_ADD) || (incr->gtOper == GT_SUB)) || (incr->AsOp()->gtOp1->gtOper != GT_LCL_VAR) || (incr->AsOp()->gtOp1->AsLclVarCommon()->GetLclNum() != lvar) || (incr->AsOp()->gtOp2->gtOper != GT_CNS_INT) || (incr->AsOp()->gtOp2->AsIntCon()->gtIconVal != iterInc) || (testStmt->GetRootNode()->gtOper != GT_JTRUE)) { noway_assert(!"Bad precondition in Compiler::optUnrollLoops()"); continue; } // clang-format on // Heuristic: Estimated cost in code size of the unrolled loop. { ClrSafeInt<unsigned> loopCostSz; // Cost is size of one iteration auto tryIndex = loop.lpTop->bbTryIndex; // Besides calculating the loop cost, also ensure that all loop blocks are within the same EH // region, and count the number of BBJ_RETURN blocks in the loop. loopRetCount = 0; for (BasicBlock* const block : loop.LoopBlocks()) { if (block->bbTryIndex != tryIndex) { // Unrolling would require cloning EH regions JITDUMP("Failed to unroll loop " FMT_LP ": EH constraint\n", lnum); goto DONE_LOOP; } if (block->bbJumpKind == BBJ_RETURN) { ++loopRetCount; } for (Statement* const stmt : block->Statements()) { gtSetStmtInfo(stmt); loopCostSz += stmt->GetCostSz(); } } #ifdef JIT32_GCENCODER if ((totalIter > 0) && (fgReturnCount + loopRetCount * (totalIter - 1) > SET_EPILOGCNT_MAX)) { // Jit32 GC encoder can't report more than SET_EPILOGCNT_MAX epilogs. JITDUMP("Failed to unroll loop " FMT_LP ": GC encoder max epilog constraint\n", lnum); goto DONE_LOOP; } #endif // !JIT32_GCENCODER // Compute the estimated increase in code size for the unrolled loop. ClrSafeInt<unsigned> fixedLoopCostSz(8); ClrSafeInt<int> unrollCostSz = ClrSafeInt<int>(loopCostSz * ClrSafeInt<unsigned>(totalIter)) - ClrSafeInt<int>(loopCostSz + fixedLoopCostSz); // Don't unroll if too much code duplication would result. if (unrollCostSz.IsOverflow() || (unrollCostSz.Value() > unrollLimitSz)) { JITDUMP("Failed to unroll loop " FMT_LP ": size constraint (%d > %d) (heuristic)\n", lnum, unrollCostSz.Value(), unrollLimitSz); goto DONE_LOOP; } // Looks like a good idea to unroll this loop, let's do it! CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG if (verbose) { printf("\nUnrolling loop "); optPrintLoopInfo(&loop); printf(" over V%02u from %u to %u unrollCostSz = %d\n\n", lvar, lbeg, llim, unrollCostSz); } #endif } #if FEATURE_LOOP_ALIGN for (BasicBlock* const block : loop.LoopBlocks()) { block->unmarkLoopAlign(this DEBUG_ARG("Unrolled loop")); } #endif // Create the unrolled loop statement list. { // When unrolling a loop, that loop disappears (and will be removed from the loop table). Each unrolled // block will be set to exist within the parent loop, if any. However, if we unroll a loop that has // nested loops, we will create multiple copies of the nested loops. This requires adding new loop table // entries to represent the new loops. Instead of trying to do this incrementally, in the case where // nested loops exist (in any unrolled loop) we rebuild the entire loop table after unrolling. BlockToBlockMap blockMap(getAllocator(CMK_LoopOpt)); BasicBlock* insertAfter = bottom; BasicBlock::loopNumber newLoopNum = loop.lpParent; bool anyNestedLoopsUnrolledThisLoop = false; int lval; unsigned iterToUnroll = totalIter; // The number of iterations left to unroll for (lval = lbeg; iterToUnroll > 0; iterToUnroll--) { // Note: we can't use the loop.LoopBlocks() iterator, as it captures loop.lpBottom->bbNext at the // beginning of iteration, and we insert blocks before that. So we need to evaluate lpBottom->bbNext // every iteration. for (BasicBlock* block = loop.lpTop; block != loop.lpBottom->bbNext; block = block->bbNext) { BasicBlock* newBlock = insertAfter = fgNewBBafter(block->bbJumpKind, insertAfter, /*extendRegion*/ true); blockMap.Set(block, newBlock, BlockToBlockMap::Overwrite); if (!BasicBlock::CloneBlockState(this, newBlock, block, lvar, lval)) { // CloneBlockState (specifically, gtCloneExpr) doesn't handle everything. If it fails // to clone a block in the loop, splice out and forget all the blocks we cloned so far: // put the loop blocks back to how they were before we started cloning blocks, // and abort unrolling the loop. BasicBlock* oldBottomNext = insertAfter->bbNext; bottom->bbNext = oldBottomNext; oldBottomNext->bbPrev = bottom; loop.lpFlags |= LPFLG_DONT_UNROLL; // Mark it so we don't try to unroll it again. INDEBUG(++unrollFailures); JITDUMP("Failed to unroll loop " FMT_LP ": block cloning failed on " FMT_BB "\n", lnum, block->bbNum); goto DONE_LOOP; } // All blocks in the unrolled loop will now be marked with the parent loop number. Note that // if the loop being unrolled contains nested (child) loops, we will notice this below (when // we set anyNestedLoopsUnrolledThisLoop), and that will cause us to rebuild the entire loop // table and all loop annotations on blocks. However, if the loop contains no nested loops, // setting the block `bbNatLoopNum` here is sufficient to incrementally update the block's // loop info. newBlock->bbNatLoopNum = newLoopNum; // Block weight should no longer have the loop multiplier // // Note this is not quite right, as we may not have upscaled by this amount // and we might not have upscaled at all, if we had profile data. // newBlock->scaleBBWeight(1.0 / BB_LOOP_WEIGHT_SCALE); // Jump dests are set in a post-pass; make sure CloneBlockState hasn't tried to set them. assert(newBlock->bbJumpDest == nullptr); if (block == bottom) { // Remove the test; we're doing a full unroll. Statement* testCopyStmt = newBlock->lastStmt(); GenTree* testCopyExpr = testCopyStmt->GetRootNode(); assert(testCopyExpr->gtOper == GT_JTRUE); GenTree* sideEffList = nullptr; gtExtractSideEffList(testCopyExpr, &sideEffList, GTF_SIDE_EFFECT | GTF_ORDER_SIDEEFF); if (sideEffList == nullptr) { fgRemoveStmt(newBlock, testCopyStmt); } else { testCopyStmt->SetRootNode(sideEffList); } newBlock->bbJumpKind = BBJ_NONE; } } // Now redirect any branches within the newly-cloned iteration. // Don't include `bottom` in the iteration, since we've already changed the // newBlock->bbJumpKind, above. for (BasicBlock* block = loop.lpTop; block != loop.lpBottom; block = block->bbNext) { BasicBlock* newBlock = blockMap[block]; optCopyBlkDest(block, newBlock); optRedirectBlock(newBlock, &blockMap); } /* update the new value for the unrolled iterator */ switch (iterOper) { case GT_ADD: lval += iterInc; break; case GT_SUB: lval -= iterInc; break; case GT_RSH: case GT_LSH: noway_assert(!"Unrolling not implemented for this loop iterator"); goto DONE_LOOP; default: noway_assert(!"Unknown operator for constant loop iterator"); goto DONE_LOOP; } } // If we get here, we successfully cloned all the blocks in the unrolled loop. // Gut the old loop body for (BasicBlock* const block : loop.LoopBlocks()) { // Check if the old loop body had any nested loops that got cloned. Note that we need to do this // here, and not in the loop above, to handle the special case where totalIter is zero, and the // above loop doesn't execute. if (block->bbNatLoopNum != lnum) { anyNestedLoopsUnrolledThisLoop = true; } block->bbStmtList = nullptr; block->bbJumpKind = BBJ_NONE; block->bbFlags &= ~BBF_LOOP_HEAD; block->bbJumpDest = nullptr; block->bbNatLoopNum = newLoopNum; } if (anyNestedLoopsUnrolledThisLoop) { anyNestedLoopsUnrolled = true; } // If the HEAD is a BBJ_COND drop the condition (and make HEAD a BBJ_NONE block). if (head->bbJumpKind == BBJ_COND) { testStmt = head->lastStmt(); noway_assert(testStmt->GetRootNode()->gtOper == GT_JTRUE); fgRemoveStmt(head, testStmt); head->bbJumpKind = BBJ_NONE; } else { /* the loop must execute */ noway_assert(head->bbJumpKind == BBJ_NONE); } #ifdef DEBUG if (verbose) { printf("Whole unrolled loop:\n"); gtDispTree(initStmt->GetRootNode()); printf("\n"); fgDumpTrees(top, insertAfter); if (anyNestedLoopsUnrolledThisLoop) { printf("Unrolled loop " FMT_LP " contains nested loops\n", lnum); } } #endif // DEBUG // Update loop table. optMarkLoopRemoved(lnum); // Note if we created new BBJ_RETURNs (or removed some). if (totalIter > 0) { fgReturnCount += loopRetCount * (totalIter - 1); } else { assert(totalIter == 0); assert(fgReturnCount >= loopRetCount); fgReturnCount -= loopRetCount; } // Remember that something has changed. INDEBUG(++unrollCount); change = true; } DONE_LOOP:; } if (change) { #ifdef DEBUG if (verbose) { printf("\nFinished unrolling %d loops", unrollCount); if (unrollFailures > 0) { printf(", %d failures due to block cloning", unrollFailures); } printf("\n"); if (anyNestedLoopsUnrolled) { printf("At least one unrolled loop contains nested loops; recomputing loop table\n"); } } #endif // DEBUG // If we unrolled any nested loops, we rebuild the loop table (including recomputing the // return blocks list). constexpr bool computePreds = true; constexpr bool computeDoms = true; const bool computeReturnBlocks = anyNestedLoopsUnrolled; const bool computeLoops = anyNestedLoopsUnrolled; fgUpdateChangedFlowGraph(computePreds, computeDoms, computeReturnBlocks, computeLoops); DBEXEC(verbose, fgDispBasicBlocks()); } else { #ifdef DEBUG assert(unrollCount == 0); assert(!anyNestedLoopsUnrolled); if (unrollFailures > 0) { printf("\nFinished loop unrolling, %d failures due to block cloning\n", unrollFailures); } #endif // DEBUG } #ifdef DEBUG fgDebugCheckBBlist(true); fgDebugCheckLoopTable(); #endif // DEBUG return PhaseStatus::MODIFIED_EVERYTHING; } #ifdef _PREFAST_ #pragma warning(pop) #endif /***************************************************************************** * * Return false if there is a code path from 'topBB' to 'botBB' that might * not execute a method call. */ bool Compiler::optReachWithoutCall(BasicBlock* topBB, BasicBlock* botBB) { // TODO-Cleanup: Currently BBF_GC_SAFE_POINT is not set for helper calls, // as some helper calls are neither interruptible nor hijackable. // When we can determine this, then we can set BBF_GC_SAFE_POINT for // those helpers too. noway_assert(topBB->bbNum <= botBB->bbNum); // We can always check topBB and botBB for any gc safe points and early out if ((topBB->bbFlags | botBB->bbFlags) & BBF_GC_SAFE_POINT) { return false; } // Otherwise we will need to rely upon the dominator sets if (!fgDomsComputed) { // return a conservative answer of true when we don't have the dominator sets return true; } BasicBlock* curBB = topBB; for (;;) { noway_assert(curBB); // If we added a loop pre-header block then we will // have a bbNum greater than fgLastBB, and we won't have // any dominator information about this block, so skip it. // if (curBB->bbNum <= fgLastBB->bbNum) { noway_assert(curBB->bbNum <= botBB->bbNum); // Does this block contain a gc safe point? if (curBB->bbFlags & BBF_GC_SAFE_POINT) { // Will this block always execute on the way to botBB ? // // Since we are checking every block in [topBB .. botBB] and we are using // a lexical definition of a loop. // (all that we know is that is that botBB is a back-edge to topBB) // Thus while walking blocks in this range we may encounter some blocks // that are not really part of the loop, and so we need to perform // some additional checks: // // We will check that the current 'curBB' is reachable from 'topBB' // and that it dominates the block containing the back-edge 'botBB' // When both of these are true then we know that the gcsafe point in 'curBB' // will be encountered in the loop and we can return false // if (fgDominate(curBB, botBB) && fgReachable(topBB, curBB)) { return false; } } else { // If we've reached the destination block, then we're done if (curBB == botBB) { break; } } } curBB = curBB->bbNext; } // If we didn't find any blocks that contained a gc safe point and // also met the fgDominate and fgReachable criteria then we must return true // return true; } // static Compiler::fgWalkResult Compiler::optInvertCountTreeInfo(GenTree** pTree, fgWalkData* data) { OptInvertCountTreeInfoType* o = (OptInvertCountTreeInfoType*)data->pCallbackData; if (Compiler::IsSharedStaticHelper(*pTree)) { o->sharedStaticHelperCount += 1; } if ((*pTree)->OperGet() == GT_ARR_LENGTH) { o->arrayLengthCount += 1; } return WALK_CONTINUE; } //----------------------------------------------------------------------------- // optInvertWhileLoop: modify flow and duplicate code so that for/while loops are // entered at top and tested at bottom (aka loop rotation or bottom testing). // Creates a "zero trip test" condition which guards entry to the loop. // Enables loop invariant hoisting and loop cloning, which depend on // `do {} while` format loops. Enables creation of a pre-header block after the // zero trip test to place code that only runs if the loop is guaranteed to // run at least once. // // Arguments: // block -- block that may be the predecessor of the un-rotated loop's test block. // // Returns: // true if any IR changes possibly made (used to determine phase return status) // // Notes: // Uses a simple lexical screen to detect likely loops. // // Specifically, we're looking for the following case: // // ... // jmp test // `block` argument // loop: // ... // ... // test: // ..stmts.. // cond // jtrue loop // // If we find this, and the condition is simple enough, we change // the loop to the following: // // ... // ..stmts.. // duplicated cond block statments // cond // duplicated cond // jfalse done // // else fall-through // loop: // ... // ... // test: // ..stmts.. // cond // jtrue loop // done: // // Makes no changes if the flow pattern match fails. // // May not modify a loop if profile is unfavorable, if the cost of duplicating // code is large (factoring in potential CSEs). // bool Compiler::optInvertWhileLoop(BasicBlock* block) { assert(opts.OptimizationEnabled()); assert(compCodeOpt() != SMALL_CODE); // Does the BB end with an unconditional jump? if (block->bbJumpKind != BBJ_ALWAYS || (block->bbFlags & BBF_KEEP_BBJ_ALWAYS)) { // It can't be one of the ones we use for our exception magic return false; } // Get hold of the jump target BasicBlock* bTest = block->bbJumpDest; // Does the block consist of 'jtrue(cond) block' ? if (bTest->bbJumpKind != BBJ_COND) { return false; } // bTest must be a backwards jump to block->bbNext if (bTest->bbJumpDest != block->bbNext) { return false; } // Since test is a BBJ_COND it will have a bbNext noway_assert(bTest->bbNext != nullptr); // 'block' must be in the same try region as the condition, since we're going to insert a duplicated condition // in a new block after 'block', and the condition might include exception throwing code. // On non-funclet platforms (x86), the catch exit is a BBJ_ALWAYS, but we don't want that to // be considered as the head of a loop, so also disallow different handler regions. if (!BasicBlock::sameEHRegion(block, bTest)) { return false; } // The duplicated condition block will branch to bTest->bbNext, so that also better be in the // same try region (or no try region) to avoid generating illegal flow. BasicBlock* bTestNext = bTest->bbNext; if (bTestNext->hasTryIndex() && !BasicBlock::sameTryRegion(block, bTestNext)) { return false; } // It has to be a forward jump. Defer this check until after all the cheap checks // are done, since it iterates forward in the block list looking for bbJumpDest. // TODO-CQ: Check if we can also optimize the backwards jump as well. // if (!fgIsForwardBranch(block)) { return false; } // Find the loop termination test at the bottom of the loop. Statement* condStmt = bTest->lastStmt(); // Verify the test block ends with a conditional that we can manipulate. GenTree* const condTree = condStmt->GetRootNode(); noway_assert(condTree->gtOper == GT_JTRUE); if (!condTree->AsOp()->gtOp1->OperIsCompare()) { return false; } // Estimate the cost of cloning the entire test block. // // Note: it would help throughput to compute the maximum cost // first and early out for large bTest blocks, as we are doing two // tree walks per tree. But because of this helper call scan, the // maximum cost depends on the trees in the block. // // We might consider flagging blocks with hoistable helper calls // during importation, so we can avoid the helper search and // implement an early bail out for large blocks with no helper calls. // // Note that gtPrepareCost can cause operand swapping, so we must // return `true` (possible IR change) from here on. unsigned estDupCostSz = 0; for (Statement* const stmt : bTest->Statements()) { GenTree* tree = stmt->GetRootNode(); gtPrepareCost(tree); estDupCostSz += tree->GetCostSz(); } weight_t loopIterations = BB_LOOP_WEIGHT_SCALE; bool allProfileWeightsAreValid = false; weight_t const weightBlock = block->bbWeight; weight_t const weightTest = bTest->bbWeight; weight_t const weightNext = block->bbNext->bbWeight; // If we have profile data then we calculate the number of times // the loop will iterate into loopIterations if (fgIsUsingProfileWeights()) { // Only rely upon the profile weight when all three of these blocks // have good profile weights if (block->hasProfileWeight() && bTest->hasProfileWeight() && block->bbNext->hasProfileWeight()) { // If this while loop never iterates then don't bother transforming // if (weightNext == BB_ZERO_WEIGHT) { return true; } // We generally expect weightTest == weightNext + weightBlock. // // Tolerate small inconsistencies... // if (!fgProfileWeightsConsistent(weightBlock + weightNext, weightTest)) { JITDUMP("Profile weights locally inconsistent: block " FMT_WT ", next " FMT_WT ", test " FMT_WT "\n", weightBlock, weightNext, weightTest); } else { allProfileWeightsAreValid = true; // Determine iteration count // // weightNext is the number of time this loop iterates // weightBlock is the number of times that we enter the while loop // loopIterations is the average number of times that this loop iterates // loopIterations = weightNext / weightBlock; } } else { JITDUMP("Missing profile data for loop!\n"); } } unsigned maxDupCostSz = 34; if ((compCodeOpt() == FAST_CODE) || compStressCompile(STRESS_DO_WHILE_LOOPS, 30)) { maxDupCostSz *= 4; } // If this loop iterates a lot then raise the maxDupCost if (loopIterations >= 12.0) { maxDupCostSz *= 2; if (loopIterations >= 96.0) { maxDupCostSz *= 2; } } // If the compare has too high cost then we don't want to dup. bool costIsTooHigh = (estDupCostSz > maxDupCostSz); OptInvertCountTreeInfoType optInvertTotalInfo = {}; if (costIsTooHigh) { // If we already know that the cost is acceptable, then don't waste time walking the tree // counting things to boost the maximum allowed cost. // // If the loop condition has a shared static helper, we really want this loop converted // as not converting the loop will disable loop hoisting, meaning the shared helper will // be executed on every loop iteration. // // If the condition has array.Length operations, also boost, as they are likely to be CSE'd. for (Statement* const stmt : bTest->Statements()) { GenTree* tree = stmt->GetRootNode(); OptInvertCountTreeInfoType optInvertInfo = {}; fgWalkTreePre(&tree, Compiler::optInvertCountTreeInfo, &optInvertInfo); optInvertTotalInfo.sharedStaticHelperCount += optInvertInfo.sharedStaticHelperCount; optInvertTotalInfo.arrayLengthCount += optInvertInfo.arrayLengthCount; if ((optInvertInfo.sharedStaticHelperCount > 0) || (optInvertInfo.arrayLengthCount > 0)) { // Calculate a new maximum cost. We might be able to early exit. unsigned newMaxDupCostSz = maxDupCostSz + 24 * min(optInvertTotalInfo.sharedStaticHelperCount, (int)(loopIterations + 1.5)) + 8 * optInvertTotalInfo.arrayLengthCount; // Is the cost too high now? costIsTooHigh = (estDupCostSz > newMaxDupCostSz); if (!costIsTooHigh) { // No need counting any more trees; we're going to do the transformation. JITDUMP("Decided to duplicate loop condition block after counting helpers in tree [%06u] in " "block " FMT_BB, dspTreeID(tree), bTest->bbNum); maxDupCostSz = newMaxDupCostSz; // for the JitDump output below break; } } } } #ifdef DEBUG if (verbose) { // Note that `optInvertTotalInfo.sharedStaticHelperCount = 0` means either there were zero helpers, or the // tree walk to count them was not done. printf( "\nDuplication of loop condition [%06u] is %s, because the cost of duplication (%i) is %s than %i," "\n loopIterations = %7.3f, optInvertTotalInfo.sharedStaticHelperCount >= %d, validProfileWeights = %s\n", dspTreeID(condTree), costIsTooHigh ? "not done" : "performed", estDupCostSz, costIsTooHigh ? "greater" : "less or equal", maxDupCostSz, loopIterations, optInvertTotalInfo.sharedStaticHelperCount, dspBool(allProfileWeightsAreValid)); } #endif if (costIsTooHigh) { return true; } bool foundCondTree = false; // Create a new block after `block` to put the copied condition code. block->bbJumpKind = BBJ_NONE; block->bbJumpDest = nullptr; BasicBlock* bNewCond = fgNewBBafter(BBJ_COND, block, /*extendRegion*/ true); // Clone each statement in bTest and append to bNewCond. for (Statement* const stmt : bTest->Statements()) { GenTree* originalTree = stmt->GetRootNode(); GenTree* clonedTree = gtCloneExpr(originalTree); // Special case handling needed for the conditional jump tree if (originalTree == condTree) { foundCondTree = true; // Get the compare subtrees GenTree* originalCompareTree = originalTree->AsOp()->gtOp1; GenTree* clonedCompareTree = clonedTree->AsOp()->gtOp1; assert(originalCompareTree->OperIsCompare()); assert(clonedCompareTree->OperIsCompare()); // Flag compare and cloned copy so later we know this loop // has a proper zero trip test. originalCompareTree->gtFlags |= GTF_RELOP_ZTT; clonedCompareTree->gtFlags |= GTF_RELOP_ZTT; // The original test branches to remain in the loop. The // new cloned test will branch to avoid the loop. So the // cloned compare needs to reverse the branch condition. gtReverseCond(clonedCompareTree); } Statement* clonedStmt = fgNewStmtAtEnd(bNewCond, clonedTree); if (opts.compDbgInfo) { clonedStmt->SetDebugInfo(stmt->GetDebugInfo()); } } assert(foundCondTree); // Flag the block that received the copy as potentially having an array/vtable // reference, nullcheck, object/array allocation if the block copied from did; // this is a conservative guess. if (auto copyFlags = bTest->bbFlags & (BBF_HAS_IDX_LEN | BBF_HAS_NULLCHECK | BBF_HAS_NEWOBJ | BBF_HAS_NEWARRAY)) { bNewCond->bbFlags |= copyFlags; } bNewCond->bbJumpDest = bTest->bbNext; bNewCond->inheritWeight(block); // Update bbRefs and bbPreds for 'bNewCond', 'bNewCond->bbNext' 'bTest' and 'bTest->bbNext'. fgAddRefPred(bNewCond, block); fgAddRefPred(bNewCond->bbNext, bNewCond); fgRemoveRefPred(bTest, block); fgAddRefPred(bTest->bbNext, bNewCond); // Move all predecessor edges that look like loop entry edges to point to the new cloned condition // block, not the existing condition block. The idea is that if we only move `block` to point to // `bNewCond`, but leave other `bTest` predecessors still pointing to `bTest`, when we eventually // recognize loops, the loop will appear to have multiple entries, which will prevent optimization. // We don't have loops yet, but blocks should be in increasing lexical numbered order, so use that // as the proxy for predecessors that are "in" versus "out" of the potential loop. Note that correctness // is maintained no matter which condition block we point to, but we'll lose optimization potential // (and create spaghetti code) if we get it wrong. BlockToBlockMap blockMap(getAllocator(CMK_LoopOpt)); bool blockMapInitialized = false; unsigned loopFirstNum = bNewCond->bbNext->bbNum; unsigned loopBottomNum = bTest->bbNum; for (BasicBlock* const predBlock : bTest->PredBlocks()) { unsigned bNum = predBlock->bbNum; if ((loopFirstNum <= bNum) && (bNum <= loopBottomNum)) { // Looks like the predecessor is from within the potential loop; skip it. continue; } if (!blockMapInitialized) { blockMapInitialized = true; blockMap.Set(bTest, bNewCond); } // Redirect the predecessor to the new block. JITDUMP("Redirecting non-loop " FMT_BB " -> " FMT_BB " to " FMT_BB " -> " FMT_BB "\n", predBlock->bbNum, bTest->bbNum, predBlock->bbNum, bNewCond->bbNum); optRedirectBlock(predBlock, &blockMap, /*updatePreds*/ true); } // If we have profile data for all blocks and we know that we are cloning the // `bTest` block into `bNewCond` and thus changing the control flow from `block` so // that it no longer goes directly to `bTest` anymore, we have to adjust // various weights. // if (allProfileWeightsAreValid) { // Update the weight for bTest // JITDUMP("Reducing profile weight of " FMT_BB " from " FMT_WT " to " FMT_WT "\n", bTest->bbNum, weightTest, weightNext); bTest->bbWeight = weightNext; // Determine the new edge weights. // // We project the next/jump ratio for block and bTest by using // the original likelihoods out of bTest. // // Note "next" is the loop top block, not bTest's bbNext, // we'll call this latter block "after". // weight_t const testToNextLikelihood = min(1.0, weightNext / weightTest); weight_t const testToAfterLikelihood = 1.0 - testToNextLikelihood; // Adjust edges out of bTest (which now has weight weightNext) // weight_t const testToNextWeight = weightNext * testToNextLikelihood; weight_t const testToAfterWeight = weightNext * testToAfterLikelihood; flowList* const edgeTestToNext = fgGetPredForBlock(bTest->bbJumpDest, bTest); flowList* const edgeTestToAfter = fgGetPredForBlock(bTest->bbNext, bTest); JITDUMP("Setting weight of " FMT_BB " -> " FMT_BB " to " FMT_WT " (iterate loop)\n", bTest->bbNum, bTest->bbJumpDest->bbNum, testToNextWeight); JITDUMP("Setting weight of " FMT_BB " -> " FMT_BB " to " FMT_WT " (exit loop)\n", bTest->bbNum, bTest->bbNext->bbNum, testToAfterWeight); edgeTestToNext->setEdgeWeights(testToNextWeight, testToNextWeight, bTest->bbJumpDest); edgeTestToAfter->setEdgeWeights(testToAfterWeight, testToAfterWeight, bTest->bbNext); // Adjust edges out of block, using the same distribution. // JITDUMP("Profile weight of " FMT_BB " remains unchanged at " FMT_WT "\n", block->bbNum, weightBlock); weight_t const blockToNextLikelihood = testToNextLikelihood; weight_t const blockToAfterLikelihood = testToAfterLikelihood; weight_t const blockToNextWeight = weightBlock * blockToNextLikelihood; weight_t const blockToAfterWeight = weightBlock * blockToAfterLikelihood; flowList* const edgeBlockToNext = fgGetPredForBlock(bNewCond->bbNext, bNewCond); flowList* const edgeBlockToAfter = fgGetPredForBlock(bNewCond->bbJumpDest, bNewCond); JITDUMP("Setting weight of " FMT_BB " -> " FMT_BB " to " FMT_WT " (enter loop)\n", bNewCond->bbNum, bNewCond->bbNext->bbNum, blockToNextWeight); JITDUMP("Setting weight of " FMT_BB " -> " FMT_BB " to " FMT_WT " (avoid loop)\n", bNewCond->bbNum, bNewCond->bbJumpDest->bbNum, blockToAfterWeight); edgeBlockToNext->setEdgeWeights(blockToNextWeight, blockToNextWeight, bNewCond->bbNext); edgeBlockToAfter->setEdgeWeights(blockToAfterWeight, blockToAfterWeight, bNewCond->bbJumpDest); #ifdef DEBUG // Verify profile for the two target blocks is consistent. // fgDebugCheckIncomingProfileData(bNewCond->bbNext); fgDebugCheckIncomingProfileData(bNewCond->bbJumpDest); #endif // DEBUG } #ifdef DEBUG if (verbose) { printf("\nDuplicated loop exit block at " FMT_BB " for loop (" FMT_BB " - " FMT_BB ")\n", bNewCond->bbNum, bNewCond->bbNext->bbNum, bTest->bbNum); printf("Estimated code size expansion is %d\n", estDupCostSz); fgDumpBlock(bNewCond); fgDumpBlock(bTest); } #endif // DEBUG return true; } //----------------------------------------------------------------------------- // optInvertLoops: invert while loops in the method // // Returns: // suitable phase status // PhaseStatus Compiler::optInvertLoops() { noway_assert(opts.OptimizationEnabled()); noway_assert(fgModified == false); #if defined(OPT_CONFIG) if (!JitConfig.JitDoLoopInversion()) { JITDUMP("Loop inversion disabled\n"); return PhaseStatus::MODIFIED_NOTHING; } #endif // OPT_CONFIG if (compCodeOpt() == SMALL_CODE) { return PhaseStatus::MODIFIED_NOTHING; } bool madeChanges = false; // Assume no changes made for (BasicBlock* const block : Blocks()) { // Make sure the appropriate fields are initialized // if (block->bbWeight == BB_ZERO_WEIGHT) { // Zero weighted block can't have a LOOP_HEAD flag noway_assert(block->isLoopHead() == false); continue; } if (optInvertWhileLoop(block)) { madeChanges = true; } } if (fgModified) { // Reset fgModified here as we've done a consistent set of edits. // fgModified = false; } return madeChanges ? PhaseStatus::MODIFIED_EVERYTHING : PhaseStatus::MODIFIED_NOTHING; } //----------------------------------------------------------------------------- // optOptimizeLayout: reorder blocks to reduce cost of control flow // // Returns: // suitable phase status // PhaseStatus Compiler::optOptimizeLayout() { noway_assert(opts.OptimizationEnabled()); noway_assert(fgModified == false); bool madeChanges = false; const bool allowTailDuplication = true; madeChanges |= fgUpdateFlowGraph(allowTailDuplication); madeChanges |= fgReorderBlocks(); madeChanges |= fgUpdateFlowGraph(); // fgReorderBlocks can cause IR changes even if it does not modify // the flow graph. It calls gtPrepareCost which can cause operand swapping. // Work around this for now. // // Note phase status only impacts dumping and checking done post-phase, // it has no impact on a release build. // madeChanges = true; return madeChanges ? PhaseStatus::MODIFIED_EVERYTHING : PhaseStatus::MODIFIED_NOTHING; } //------------------------------------------------------------------------ // optMarkLoopHeads: Mark all potential loop heads as BBF_LOOP_HEAD. A potential loop head is a block // targeted by a lexical back edge, where the source of the back edge is reachable from the block. // Note that if there are no lexical back edges, there can't be any loops. // // If there are any potential loop heads, set `fgHasLoops` to `true`. // // Assumptions: // The reachability sets must be computed and valid. // void Compiler::optMarkLoopHeads() { #ifdef DEBUG if (verbose) { printf("*************** In optMarkLoopHeads()\n"); } assert(!fgCheapPredsValid); assert(fgReachabilitySetsValid); fgDebugCheckBBNumIncreasing(); int loopHeadsMarked = 0; #endif bool hasLoops = false; for (BasicBlock* const block : Blocks()) { // Set BBF_LOOP_HEAD if we have backwards branches to this block. unsigned blockNum = block->bbNum; for (BasicBlock* const predBlock : block->PredBlocks()) { if (blockNum <= predBlock->bbNum) { if (predBlock->bbJumpKind == BBJ_CALLFINALLY) { // Loops never have BBJ_CALLFINALLY as the source of their "back edge". continue; } // If block can reach predBlock then we have a loop head if (BlockSetOps::IsMember(this, predBlock->bbReach, blockNum)) { hasLoops = true; block->bbFlags |= BBF_LOOP_HEAD; INDEBUG(++loopHeadsMarked); break; // No need to look at more `block` predecessors } } } } JITDUMP("%d loop heads marked\n", loopHeadsMarked); fgHasLoops = hasLoops; } //----------------------------------------------------------------------------- // optResetLoopInfo: reset all loop info in preparation for rebuilding the loop table, or preventing // future phases from accessing loop-related data. // void Compiler::optResetLoopInfo() { #ifdef DEBUG if (verbose) { printf("*************** In optResetLoopInfo()\n"); } #endif optLoopCount = 0; // This will force the table to be rebuilt loopAlignCandidates = 0; // This will cause users to crash if they use the table when it is considered empty. // TODO: the loop table is always allocated as the same (maximum) size, so this is wasteful. // We could zero it out (possibly only in DEBUG) to be paranoid, but there's no reason to // force it to be re-allocated. optLoopTable = nullptr; for (BasicBlock* const block : Blocks()) { // If the block weight didn't come from profile data, reset it so it can be calculated again. if (!block->hasProfileWeight()) { block->bbWeight = BB_UNITY_WEIGHT; block->bbFlags &= ~BBF_RUN_RARELY; } block->bbFlags &= ~BBF_LOOP_FLAGS; block->bbNatLoopNum = BasicBlock::NOT_IN_LOOP; } } //----------------------------------------------------------------------------- // optFindAndScaleGeneralLoopBlocks: scale block weights based on loop nesting depth. // Note that this uses a very general notion of "loop": any block targeted by a reachable // back-edge is considered a loop. // void Compiler::optFindAndScaleGeneralLoopBlocks() { #ifdef DEBUG if (verbose) { printf("*************** In optFindAndScaleGeneralLoopBlocks()\n"); } #endif // This code depends on block number ordering. INDEBUG(fgDebugCheckBBNumIncreasing()); unsigned generalLoopCount = 0; // We will use the following terminology: // top - the first basic block in the loop (i.e. the head of the backward edge) // bottom - the last block in the loop (i.e. the block from which we jump to the top) // lastBottom - used when we have multiple back edges to the same top for (BasicBlock* const top : Blocks()) { // Only consider `top` blocks already determined to be potential loop heads. if (!top->isLoopHead()) { continue; } BasicBlock* foundBottom = nullptr; for (BasicBlock* const bottom : top->PredBlocks()) { // Is this a loop candidate? - We look for "back edges" // Is this a backward edge? (from BOTTOM to TOP) if (top->bbNum > bottom->bbNum) { continue; } // We only consider back-edges that are BBJ_COND or BBJ_ALWAYS for loops. if ((bottom->bbJumpKind != BBJ_COND) && (bottom->bbJumpKind != BBJ_ALWAYS)) { continue; } /* the top block must be able to reach the bottom block */ if (!fgReachable(top, bottom)) { continue; } /* Found a new loop, record the longest backedge in foundBottom */ if ((foundBottom == nullptr) || (bottom->bbNum > foundBottom->bbNum)) { foundBottom = bottom; } } if (foundBottom) { generalLoopCount++; /* Mark all blocks between 'top' and 'bottom' */ optScaleLoopBlocks(top, foundBottom); } // We track at most 255 loops if (generalLoopCount == 255) { #if COUNT_LOOPS totalUnnatLoopOverflows++; #endif break; } } JITDUMP("\nFound a total of %d general loops.\n", generalLoopCount); #if COUNT_LOOPS totalUnnatLoopCount += generalLoopCount; #endif } //----------------------------------------------------------------------------- // optFindLoops: find loops in the function. // // The JIT recognizes two types of loops in a function: natural loops and "general" (or "unnatural") loops. // Natural loops are those which get added to the loop table. Most downstream optimizations require // using natural loops. See `optFindNaturalLoops` for a definition of the criteria for recognizing a natural loop. // A general loop is defined as a lexical (program order) range of blocks where a later block branches to an // earlier block (that is, there is a back edge in the flow graph), and the later block is reachable from the earlier // block. General loops are used for weighting flow graph blocks (when there is no block profile data), as well as // for determining if we require fully interruptible GC information. // // Notes: // Also (re)sets all non-IBC block weights, and marks loops potentially needing alignment padding. // void Compiler::optFindLoops() { #ifdef DEBUG if (verbose) { printf("*************** In optFindLoops()\n"); } #endif noway_assert(opts.OptimizationEnabled()); assert(fgDomsComputed); optMarkLoopHeads(); // Were there any potential loops in the flow graph? if (fgHasLoops) { optFindNaturalLoops(); optFindAndScaleGeneralLoopBlocks(); optIdentifyLoopsForAlignment(); // Check if any of the loops need alignment } #ifdef DEBUG fgDebugCheckLoopTable(); #endif optLoopsMarked = true; } //----------------------------------------------------------------------------- // optFindLoopsPhase: The wrapper function for the "find loops" phase. // PhaseStatus Compiler::optFindLoopsPhase() { optFindLoops(); return PhaseStatus::MODIFIED_EVERYTHING; } /***************************************************************************** * * Determine the kind of interference for the call. */ /* static */ inline Compiler::callInterf Compiler::optCallInterf(GenTreeCall* call) { // if not a helper, kills everything if (call->gtCallType != CT_HELPER) { return CALLINT_ALL; } // setfield and array address store kill all indirections switch (eeGetHelperNum(call->gtCallMethHnd)) { case CORINFO_HELP_ASSIGN_REF: // Not strictly needed as we don't make a GT_CALL with this case CORINFO_HELP_CHECKED_ASSIGN_REF: // Not strictly needed as we don't make a GT_CALL with this case CORINFO_HELP_ASSIGN_BYREF: // Not strictly needed as we don't make a GT_CALL with this case CORINFO_HELP_SETFIELDOBJ: case CORINFO_HELP_ARRADDR_ST: return CALLINT_REF_INDIRS; case CORINFO_HELP_SETFIELDFLOAT: case CORINFO_HELP_SETFIELDDOUBLE: case CORINFO_HELP_SETFIELD8: case CORINFO_HELP_SETFIELD16: case CORINFO_HELP_SETFIELD32: case CORINFO_HELP_SETFIELD64: return CALLINT_SCL_INDIRS; case CORINFO_HELP_ASSIGN_STRUCT: // Not strictly needed as we don't use this case CORINFO_HELP_MEMSET: // Not strictly needed as we don't make a GT_CALL with this case CORINFO_HELP_MEMCPY: // Not strictly needed as we don't make a GT_CALL with this case CORINFO_HELP_SETFIELDSTRUCT: return CALLINT_ALL_INDIRS; default: break; } // other helpers kill nothing return CALLINT_NONE; } /***************************************************************************** * * See if the given tree can be computed in the given precision (which must * be smaller than the type of the tree for this to make sense). If 'doit' * is false, we merely check to see whether narrowing is possible; if we * get called with 'doit' being true, we actually perform the narrowing. */ bool Compiler::optNarrowTree(GenTree* tree, var_types srct, var_types dstt, ValueNumPair vnpNarrow, bool doit) { genTreeOps oper; unsigned kind; noway_assert(tree); noway_assert(genActualType(tree->gtType) == genActualType(srct)); /* Assume we're only handling integer types */ noway_assert(varTypeIsIntegral(srct)); noway_assert(varTypeIsIntegral(dstt)); unsigned srcSize = genTypeSize(srct); unsigned dstSize = genTypeSize(dstt); /* dstt must be smaller than srct to narrow */ if (dstSize >= srcSize) { return false; } /* Figure out what kind of a node we have */ oper = tree->OperGet(); kind = tree->OperKind(); if (oper == GT_ASG) { noway_assert(doit == false); return false; } ValueNumPair NoVNPair = ValueNumPair(); if (kind & GTK_LEAF) { switch (oper) { /* Constants can usually be narrowed by changing their value */ CLANG_FORMAT_COMMENT_ANCHOR; #ifndef TARGET_64BIT __int64 lval; __int64 lmask; case GT_CNS_LNG: lval = tree->AsIntConCommon()->LngValue(); lmask = 0; switch (dstt) { case TYP_BYTE: lmask = 0x0000007F; break; case TYP_BOOL: case TYP_UBYTE: lmask = 0x000000FF; break; case TYP_SHORT: lmask = 0x00007FFF; break; case TYP_USHORT: lmask = 0x0000FFFF; break; case TYP_INT: lmask = 0x7FFFFFFF; break; case TYP_UINT: lmask = 0xFFFFFFFF; break; default: return false; } if ((lval & lmask) != lval) return false; if (doit) { tree->BashToConst(static_cast<int32_t>(lval)); if (vnStore != nullptr) { fgValueNumberTreeConst(tree); } } return true; #endif case GT_CNS_INT: ssize_t ival; ival = tree->AsIntCon()->gtIconVal; ssize_t imask; imask = 0; switch (dstt) { case TYP_BYTE: imask = 0x0000007F; break; case TYP_BOOL: case TYP_UBYTE: imask = 0x000000FF; break; case TYP_SHORT: imask = 0x00007FFF; break; case TYP_USHORT: imask = 0x0000FFFF; break; #ifdef TARGET_64BIT case TYP_INT: imask = 0x7FFFFFFF; break; case TYP_UINT: imask = 0xFFFFFFFF; break; #endif // TARGET_64BIT default: return false; } if ((ival & imask) != ival) { return false; } #ifdef TARGET_64BIT if (doit) { tree->gtType = TYP_INT; tree->AsIntCon()->gtIconVal = (int)ival; if (vnStore != nullptr) { fgValueNumberTreeConst(tree); } } #endif // TARGET_64BIT return true; /* Operands that are in memory can usually be narrowed simply by changing their gtType */ case GT_LCL_VAR: /* We only allow narrowing long -> int for a GT_LCL_VAR */ if (dstSize == sizeof(int)) { goto NARROW_IND; } break; case GT_CLS_VAR: case GT_LCL_FLD: goto NARROW_IND; default: break; } noway_assert(doit == false); return false; } if (kind & (GTK_BINOP | GTK_UNOP)) { GenTree* op1; op1 = tree->AsOp()->gtOp1; GenTree* op2; op2 = tree->AsOp()->gtOp2; switch (tree->gtOper) { case GT_AND: noway_assert(genActualType(tree->gtType) == genActualType(op1->gtType)); noway_assert(genActualType(tree->gtType) == genActualType(op2->gtType)); GenTree* opToNarrow; opToNarrow = nullptr; GenTree** otherOpPtr; otherOpPtr = nullptr; bool foundOperandThatBlocksNarrowing; foundOperandThatBlocksNarrowing = false; // If 'dstt' is unsigned and one of the operands can be narrowed into 'dsst', // the result of the GT_AND will also fit into 'dstt' and can be narrowed. // The same is true if one of the operands is an int const and can be narrowed into 'dsst'. if (!gtIsActiveCSE_Candidate(op2) && ((op2->gtOper == GT_CNS_INT) || varTypeIsUnsigned(dstt))) { if (optNarrowTree(op2, srct, dstt, NoVNPair, false)) { opToNarrow = op2; otherOpPtr = &tree->AsOp()->gtOp1; } else { foundOperandThatBlocksNarrowing = true; } } if ((opToNarrow == nullptr) && !gtIsActiveCSE_Candidate(op1) && ((op1->gtOper == GT_CNS_INT) || varTypeIsUnsigned(dstt))) { if (optNarrowTree(op1, srct, dstt, NoVNPair, false)) { opToNarrow = op1; otherOpPtr = &tree->AsOp()->gtOp2; } else { foundOperandThatBlocksNarrowing = true; } } if (opToNarrow != nullptr) { // We will change the type of the tree and narrow opToNarrow // if (doit) { tree->gtType = genActualType(dstt); tree->SetVNs(vnpNarrow); optNarrowTree(opToNarrow, srct, dstt, NoVNPair, true); // We may also need to cast away the upper bits of *otherOpPtr if (srcSize == 8) { assert(tree->gtType == TYP_INT); GenTree* castOp = gtNewCastNode(TYP_INT, *otherOpPtr, false, TYP_INT); #ifdef DEBUG castOp->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif *otherOpPtr = castOp; } } return true; } if (foundOperandThatBlocksNarrowing) { noway_assert(doit == false); return false; } goto COMMON_BINOP; case GT_ADD: case GT_MUL: if (tree->gtOverflow() || varTypeIsSmall(dstt)) { noway_assert(doit == false); return false; } FALLTHROUGH; case GT_OR: case GT_XOR: noway_assert(genActualType(tree->gtType) == genActualType(op1->gtType)); noway_assert(genActualType(tree->gtType) == genActualType(op2->gtType)); COMMON_BINOP: if (gtIsActiveCSE_Candidate(op1) || gtIsActiveCSE_Candidate(op2) || !optNarrowTree(op1, srct, dstt, NoVNPair, doit) || !optNarrowTree(op2, srct, dstt, NoVNPair, doit)) { noway_assert(doit == false); return false; } /* Simply change the type of the tree */ if (doit) { if (tree->gtOper == GT_MUL && (tree->gtFlags & GTF_MUL_64RSLT)) { tree->gtFlags &= ~GTF_MUL_64RSLT; } tree->gtType = genActualType(dstt); tree->SetVNs(vnpNarrow); } return true; case GT_IND: NARROW_IND: if ((dstSize > genTypeSize(tree->gtType)) && (varTypeIsUnsigned(dstt) && !varTypeIsUnsigned(tree->gtType))) { return false; } /* Simply change the type of the tree */ if (doit && (dstSize <= genTypeSize(tree->gtType))) { if (!varTypeIsSmall(dstt)) { dstt = varTypeToSigned(dstt); } tree->gtType = dstt; tree->SetVNs(vnpNarrow); /* Make sure we don't mess up the variable type */ if ((oper == GT_LCL_VAR) || (oper == GT_LCL_FLD)) { tree->gtFlags |= GTF_VAR_CAST; } } return true; case GT_EQ: case GT_NE: case GT_LT: case GT_LE: case GT_GT: case GT_GE: /* These can always be narrowed since they only represent 0 or 1 */ return true; case GT_CAST: { var_types cast = tree->CastToType(); var_types oprt = op1->TypeGet(); unsigned oprSize = genTypeSize(oprt); if (cast != srct) { return false; } if (varTypeIsIntegralOrI(dstt) != varTypeIsIntegralOrI(oprt)) { return false; } if (tree->gtOverflow()) { return false; } /* Is this a cast from the type we're narrowing to or a smaller one? */ if (oprSize <= dstSize) { /* Bash the target type of the cast */ if (doit) { if (!varTypeIsSmall(dstt)) { dstt = varTypeToSigned(dstt); } if ((oprSize == dstSize) && ((varTypeIsUnsigned(dstt) == varTypeIsUnsigned(oprt)) || !varTypeIsSmall(dstt))) { // Same size and there is no signedness mismatch for small types: change the CAST // into a NOP JITDUMP("Cast operation has no effect, bashing [%06d] GT_CAST into a GT_NOP.\n", dspTreeID(tree)); tree->ChangeOper(GT_NOP); tree->gtType = dstt; // Clear the GTF_UNSIGNED flag, as it may have been set on the cast node tree->gtFlags &= ~GTF_UNSIGNED; tree->AsOp()->gtOp2 = nullptr; tree->gtVNPair = op1->gtVNPair; // Set to op1's ValueNumber } else { // oprSize is smaller or there is a signedness mismatch for small types // Change the CastToType in the GT_CAST node tree->CastToType() = dstt; // The result type of a GT_CAST is never a small type. // Use genActualType to widen dstt when it is a small types. tree->gtType = genActualType(dstt); tree->SetVNs(vnpNarrow); } } return true; } } return false; case GT_COMMA: if (!gtIsActiveCSE_Candidate(op2) && optNarrowTree(op2, srct, dstt, vnpNarrow, doit)) { /* Simply change the type of the tree */ if (doit) { tree->gtType = genActualType(dstt); tree->SetVNs(vnpNarrow); } return true; } return false; default: noway_assert(doit == false); return false; } } return false; } /***************************************************************************** * * The following logic figures out whether the given variable is assigned * somewhere in a list of basic blocks (or in an entire loop). */ Compiler::fgWalkResult Compiler::optIsVarAssgCB(GenTree** pTree, fgWalkData* data) { GenTree* tree = *pTree; if (tree->OperIs(GT_ASG)) { GenTree* dest = tree->AsOp()->gtOp1; genTreeOps destOper = dest->OperGet(); isVarAssgDsc* desc = (isVarAssgDsc*)data->pCallbackData; assert(desc && desc->ivaSelf == desc); if (destOper == GT_LCL_VAR) { unsigned tvar = dest->AsLclVarCommon()->GetLclNum(); if (tvar < lclMAX_ALLSET_TRACKED) { AllVarSetOps::AddElemD(data->compiler, desc->ivaMaskVal, tvar); } else { desc->ivaMaskIncomplete = true; } if (tvar == desc->ivaVar) { if (tree != desc->ivaSkip) { return WALK_ABORT; } } } else if (destOper == GT_LCL_FLD) { /* We can't track every field of every var. Moreover, indirections may access different parts of the var as different (but overlapping) fields. So just treat them as indirect accesses */ // unsigned lclNum = dest->AsLclFld()->GetLclNum(); // noway_assert(lvaTable[lclNum].lvAddrTaken); varRefKinds refs = varTypeIsGC(tree->TypeGet()) ? VR_IND_REF : VR_IND_SCL; desc->ivaMaskInd = varRefKinds(desc->ivaMaskInd | refs); } else if (destOper == GT_CLS_VAR) { desc->ivaMaskInd = varRefKinds(desc->ivaMaskInd | VR_GLB_VAR); } else if (destOper == GT_IND) { /* Set the proper indirection bits */ varRefKinds refs = varTypeIsGC(tree->TypeGet()) ? VR_IND_REF : VR_IND_SCL; desc->ivaMaskInd = varRefKinds(desc->ivaMaskInd | refs); } } else if (tree->gtOper == GT_CALL) { isVarAssgDsc* desc = (isVarAssgDsc*)data->pCallbackData; assert(desc && desc->ivaSelf == desc); desc->ivaMaskCall = optCallInterf(tree->AsCall()); } return WALK_CONTINUE; } /*****************************************************************************/ bool Compiler::optIsVarAssigned(BasicBlock* beg, BasicBlock* end, GenTree* skip, unsigned var) { bool result; isVarAssgDsc desc; desc.ivaSkip = skip; #ifdef DEBUG desc.ivaSelf = &desc; #endif desc.ivaVar = var; desc.ivaMaskCall = CALLINT_NONE; AllVarSetOps::AssignNoCopy(this, desc.ivaMaskVal, AllVarSetOps::MakeEmpty(this)); for (;;) { noway_assert(beg != nullptr); for (Statement* const stmt : beg->Statements()) { if (fgWalkTreePre(stmt->GetRootNodePointer(), optIsVarAssgCB, &desc) != WALK_CONTINUE) { result = true; goto DONE; } } if (beg == end) { break; } beg = beg->bbNext; } result = false; DONE: return result; } /***************************************************************************** * Is "var" assigned in the loop "lnum" ? */ bool Compiler::optIsVarAssgLoop(unsigned lnum, unsigned var) { assert(lnum < optLoopCount); if (var < lclMAX_ALLSET_TRACKED) { ALLVARSET_TP vs(AllVarSetOps::MakeSingleton(this, var)); return optIsSetAssgLoop(lnum, vs) != 0; } else { return optIsVarAssigned(optLoopTable[lnum].lpHead->bbNext, optLoopTable[lnum].lpBottom, nullptr, var); } } /*****************************************************************************/ int Compiler::optIsSetAssgLoop(unsigned lnum, ALLVARSET_VALARG_TP vars, varRefKinds inds) { noway_assert(lnum < optLoopCount); LoopDsc* loop = &optLoopTable[lnum]; /* Do we already know what variables are assigned within this loop? */ if (!(loop->lpFlags & LPFLG_ASGVARS_YES)) { isVarAssgDsc desc; /* Prepare the descriptor used by the tree walker call-back */ desc.ivaVar = (unsigned)-1; desc.ivaSkip = nullptr; #ifdef DEBUG desc.ivaSelf = &desc; #endif AllVarSetOps::AssignNoCopy(this, desc.ivaMaskVal, AllVarSetOps::MakeEmpty(this)); desc.ivaMaskInd = VR_NONE; desc.ivaMaskCall = CALLINT_NONE; desc.ivaMaskIncomplete = false; /* Now walk all the statements of the loop */ for (BasicBlock* const block : loop->LoopBlocks()) { for (Statement* const stmt : block->NonPhiStatements()) { fgWalkTreePre(stmt->GetRootNodePointer(), optIsVarAssgCB, &desc); if (desc.ivaMaskIncomplete) { loop->lpFlags |= LPFLG_ASGVARS_INC; } } } AllVarSetOps::Assign(this, loop->lpAsgVars, desc.ivaMaskVal); loop->lpAsgInds = desc.ivaMaskInd; loop->lpAsgCall = desc.ivaMaskCall; /* Now we know what variables are assigned in the loop */ loop->lpFlags |= LPFLG_ASGVARS_YES; } /* Now we can finally test the caller's mask against the loop's */ if (!AllVarSetOps::IsEmptyIntersection(this, loop->lpAsgVars, vars) || (loop->lpAsgInds & inds)) { return 1; } switch (loop->lpAsgCall) { case CALLINT_ALL: /* Can't hoist if the call might have side effect on an indirection. */ if (loop->lpAsgInds != VR_NONE) { return 1; } break; case CALLINT_REF_INDIRS: /* Can't hoist if the call might have side effect on an ref indirection. */ if (loop->lpAsgInds & VR_IND_REF) { return 1; } break; case CALLINT_SCL_INDIRS: /* Can't hoist if the call might have side effect on an non-ref indirection. */ if (loop->lpAsgInds & VR_IND_SCL) { return 1; } break; case CALLINT_ALL_INDIRS: /* Can't hoist if the call might have side effect on any indirection. */ if (loop->lpAsgInds & (VR_IND_REF | VR_IND_SCL)) { return 1; } break; case CALLINT_NONE: /* Other helpers kill nothing */ break; default: noway_assert(!"Unexpected lpAsgCall value"); } return 0; } void Compiler::optPerformHoistExpr(GenTree* origExpr, BasicBlock* exprBb, unsigned lnum) { assert(exprBb != nullptr); #ifdef DEBUG if (verbose) { printf("\nHoisting a copy of "); printTreeID(origExpr); printf(" from " FMT_BB " into PreHeader " FMT_BB " for loop " FMT_LP " <" FMT_BB ".." FMT_BB ">:\n", exprBb->bbNum, optLoopTable[lnum].lpHead->bbNum, lnum, optLoopTable[lnum].lpTop->bbNum, optLoopTable[lnum].lpBottom->bbNum); gtDispTree(origExpr); printf("\n"); } #endif // Create a copy of the expression and mark it for CSE's. GenTree* hoistExpr = gtCloneExpr(origExpr, GTF_MAKE_CSE); // The hoist Expr does not have to computed into a specific register, // so clear the RegNum if it was set in the original expression hoistExpr->ClearRegNum(); // Copy any loop memory dependence. optCopyLoopMemoryDependence(origExpr, hoistExpr); // At this point we should have a cloned expression, marked with the GTF_MAKE_CSE flag assert(hoistExpr != origExpr); assert(hoistExpr->gtFlags & GTF_MAKE_CSE); GenTree* hoist = hoistExpr; // The value of the expression isn't used (unless it's an assignment). if (hoistExpr->OperGet() != GT_ASG) { hoist = gtUnusedValNode(hoistExpr); } /* Put the statement in the preheader */ INDEBUG(optLoopTable[lnum].lpValidatePreHeader()); BasicBlock* preHead = optLoopTable[lnum].lpHead; // fgMorphTree requires that compCurBB be the block that contains // (or in this case, will contain) the expression. compCurBB = preHead; hoist = fgMorphTree(hoist); preHead->bbFlags |= (exprBb->bbFlags & (BBF_HAS_IDX_LEN | BBF_HAS_NULLCHECK)); Statement* hoistStmt = gtNewStmt(hoist); // Simply append the statement at the end of the preHead's list. Statement* firstStmt = preHead->firstStmt(); if (firstStmt != nullptr) { /* append after last statement */ Statement* lastStmt = preHead->lastStmt(); assert(lastStmt->GetNextStmt() == nullptr); lastStmt->SetNextStmt(hoistStmt); hoistStmt->SetPrevStmt(lastStmt); firstStmt->SetPrevStmt(hoistStmt); } else { /* Empty pre-header - store the single statement in the block */ preHead->bbStmtList = hoistStmt; hoistStmt->SetPrevStmt(hoistStmt); } hoistStmt->SetNextStmt(nullptr); #ifdef DEBUG if (verbose) { printf("This hoisted copy placed in PreHeader (" FMT_BB "):\n", preHead->bbNum); gtDispTree(hoist); printf("\n"); } #endif if (fgStmtListThreaded) { gtSetStmtInfo(hoistStmt); fgSetStmtSeq(hoistStmt); } #ifdef DEBUG if (m_nodeTestData != nullptr) { // What is the depth of the loop "lnum"? ssize_t depth = 0; unsigned lnumIter = lnum; while (optLoopTable[lnumIter].lpParent != BasicBlock::NOT_IN_LOOP) { depth++; lnumIter = optLoopTable[lnumIter].lpParent; } NodeToTestDataMap* testData = GetNodeTestData(); TestLabelAndNum tlAndN; if (testData->Lookup(origExpr, &tlAndN) && tlAndN.m_tl == TL_LoopHoist) { if (tlAndN.m_num == -1) { printf("Node "); printTreeID(origExpr); printf(" was declared 'do not hoist', but is being hoisted.\n"); assert(false); } else if (tlAndN.m_num != depth) { printf("Node "); printTreeID(origExpr); printf(" was declared as hoistable from loop at nesting depth %d; actually hoisted from loop at depth " "%d.\n", tlAndN.m_num, depth); assert(false); } else { // We've correctly hoisted this, so remove the annotation. Later, we'll check for any remaining "must // hoist" annotations. testData->Remove(origExpr); // Now we insert an annotation to make sure that "hoistExpr" is actually CSE'd. tlAndN.m_tl = TL_CSE_Def; tlAndN.m_num = m_loopHoistCSEClass++; testData->Set(hoistExpr, tlAndN); } } } #endif #if LOOP_HOIST_STATS if (!m_curLoopHasHoistedExpression) { m_loopsWithHoistedExpressions++; m_curLoopHasHoistedExpression = true; } m_totalHoistedExpressions++; #endif // LOOP_HOIST_STATS } void Compiler::optHoistLoopCode() { // If we don't have any loops in the method then take an early out now. if (optLoopCount == 0) { JITDUMP("\nNo loops; no hoisting\n"); return; } #ifdef DEBUG unsigned jitNoHoist = JitConfig.JitNoHoist(); if (jitNoHoist > 0) { JITDUMP("\nJitNoHoist set; no hoisting\n"); return; } #endif #if 0 // The code in this #if has been useful in debugging loop hoisting issues, by // enabling selective enablement of the loop hoisting optimization according to // method hash. #ifdef DEBUG unsigned methHash = info.compMethodHash(); char* lostr = getenv("loophoisthashlo"); unsigned methHashLo = 0; if (lostr != NULL) { sscanf_s(lostr, "%x", &methHashLo); // methHashLo = (unsigned(atoi(lostr)) << 2); // So we don't have to use negative numbers. } char* histr = getenv("loophoisthashhi"); unsigned methHashHi = UINT32_MAX; if (histr != NULL) { sscanf_s(histr, "%x", &methHashHi); // methHashHi = (unsigned(atoi(histr)) << 2); // So we don't have to use negative numbers. } if (methHash < methHashLo || methHash > methHashHi) return; printf("Doing loop hoisting in %s (0x%x).\n", info.compFullName, methHash); #endif // DEBUG #endif // 0 -- debugging loop hoisting issues #ifdef DEBUG if (verbose) { printf("\n*************** In optHoistLoopCode()\n"); printf("Blocks/Trees before phase\n"); fgDispBasicBlocks(true); fgDispHandlerTab(); optPrintLoopTable(); } #endif // Consider all the loop nests, in outer-to-inner order (thus hoisting expressions outside the largest loop in which // they are invariant.) LoopHoistContext hoistCtxt(this); for (unsigned lnum = 0; lnum < optLoopCount; lnum++) { if (optLoopTable[lnum].lpFlags & LPFLG_REMOVED) { JITDUMP("\nLoop " FMT_LP " was removed\n", lnum); continue; } if (optLoopTable[lnum].lpParent == BasicBlock::NOT_IN_LOOP) { optHoistLoopNest(lnum, &hoistCtxt); } } #if DEBUG if (fgModified) { if (verbose) { printf("Blocks/Trees after optHoistLoopCode() modified flowgraph\n"); fgDispBasicBlocks(true); printf(""); } // Make sure that the predecessor lists are accurate fgDebugCheckBBlist(); } #endif #ifdef DEBUG // Test Data stuff.. // If we have no test data, early out. if (m_nodeTestData == nullptr) { return; } NodeToTestDataMap* testData = GetNodeTestData(); for (NodeToTestDataMap::KeyIterator ki = testData->Begin(); !ki.Equal(testData->End()); ++ki) { TestLabelAndNum tlAndN; GenTree* node = ki.Get(); bool b = testData->Lookup(node, &tlAndN); assert(b); if (tlAndN.m_tl != TL_LoopHoist) { continue; } // Otherwise, it is a loop hoist annotation. assert(tlAndN.m_num < 100); // >= 100 indicates nested static field address, should already have been moved. if (tlAndN.m_num >= 0) { printf("Node "); printTreeID(node); printf(" was declared 'must hoist', but has not been hoisted.\n"); assert(false); } } #endif // DEBUG } void Compiler::optHoistLoopNest(unsigned lnum, LoopHoistContext* hoistCtxt) { // Do this loop, then recursively do all nested loops. JITDUMP("\n%s " FMT_LP "\n", optLoopTable[lnum].lpParent == BasicBlock::NOT_IN_LOOP ? "Loop Nest" : "Nested Loop", lnum); #if LOOP_HOIST_STATS // Record stats m_curLoopHasHoistedExpression = false; m_loopsConsidered++; #endif // LOOP_HOIST_STATS optHoistThisLoop(lnum, hoistCtxt); VNSet* hoistedInCurLoop = hoistCtxt->ExtractHoistedInCurLoop(); if (optLoopTable[lnum].lpChild != BasicBlock::NOT_IN_LOOP) { // Add the ones hoisted in "lnum" to "hoistedInParents" for any nested loops. // TODO-Cleanup: we should have a set abstraction for loops. if (hoistedInCurLoop != nullptr) { for (VNSet::KeyIterator keys = hoistedInCurLoop->Begin(); !keys.Equal(hoistedInCurLoop->End()); ++keys) { #ifdef DEBUG bool b; assert(!hoistCtxt->m_hoistedInParentLoops.Lookup(keys.Get(), &b)); #endif hoistCtxt->m_hoistedInParentLoops.Set(keys.Get(), true); } } for (unsigned child = optLoopTable[lnum].lpChild; child != BasicBlock::NOT_IN_LOOP; child = optLoopTable[child].lpSibling) { optHoistLoopNest(child, hoistCtxt); } // Now remove them. // TODO-Cleanup: we should have a set abstraction for loops. if (hoistedInCurLoop != nullptr) { for (VNSet::KeyIterator keys = hoistedInCurLoop->Begin(); !keys.Equal(hoistedInCurLoop->End()); ++keys) { // Note that we asserted when we added these that they hadn't been members, so removing is appropriate. hoistCtxt->m_hoistedInParentLoops.Remove(keys.Get()); } } } } void Compiler::optHoistThisLoop(unsigned lnum, LoopHoistContext* hoistCtxt) { LoopDsc* pLoopDsc = &optLoopTable[lnum]; /* If loop was removed continue */ if (pLoopDsc->lpFlags & LPFLG_REMOVED) { JITDUMP(" ... not hoisting " FMT_LP ": removed\n", lnum); return; } // Ensure the per-loop sets/tables are empty. hoistCtxt->m_curLoopVnInvariantCache.RemoveAll(); #ifdef DEBUG if (verbose) { printf("optHoistThisLoop for loop " FMT_LP " <" FMT_BB ".." FMT_BB ">:\n", lnum, pLoopDsc->lpTop->bbNum, pLoopDsc->lpBottom->bbNum); printf(" Loop body %s a call\n", (pLoopDsc->lpFlags & LPFLG_CONTAINS_CALL) ? "contains" : "does not contain"); printf(" Loop has %s\n", (pLoopDsc->lpExitCnt == 1) ? "single exit" : "multiple exits"); } #endif VARSET_TP loopVars(VarSetOps::Intersection(this, pLoopDsc->lpVarInOut, pLoopDsc->lpVarUseDef)); pLoopDsc->lpVarInOutCount = VarSetOps::Count(this, pLoopDsc->lpVarInOut); pLoopDsc->lpLoopVarCount = VarSetOps::Count(this, loopVars); pLoopDsc->lpHoistedExprCount = 0; #ifndef TARGET_64BIT unsigned longVarsCount = VarSetOps::Count(this, lvaLongVars); if (longVarsCount > 0) { // Since 64-bit variables take up two registers on 32-bit targets, we increase // the Counts such that each TYP_LONG variable counts twice. // VARSET_TP loopLongVars(VarSetOps::Intersection(this, loopVars, lvaLongVars)); VARSET_TP inOutLongVars(VarSetOps::Intersection(this, pLoopDsc->lpVarInOut, lvaLongVars)); #ifdef DEBUG if (verbose) { printf("\n LONGVARS(%d)=", VarSetOps::Count(this, lvaLongVars)); lvaDispVarSet(lvaLongVars); } #endif pLoopDsc->lpLoopVarCount += VarSetOps::Count(this, loopLongVars); pLoopDsc->lpVarInOutCount += VarSetOps::Count(this, inOutLongVars); } #endif // !TARGET_64BIT #ifdef DEBUG if (verbose) { printf("\n USEDEF (%d)=", VarSetOps::Count(this, pLoopDsc->lpVarUseDef)); lvaDispVarSet(pLoopDsc->lpVarUseDef); printf("\n INOUT (%d)=", pLoopDsc->lpVarInOutCount); lvaDispVarSet(pLoopDsc->lpVarInOut); printf("\n LOOPVARS(%d)=", pLoopDsc->lpLoopVarCount); lvaDispVarSet(loopVars); printf("\n"); } #endif unsigned floatVarsCount = VarSetOps::Count(this, lvaFloatVars); if (floatVarsCount > 0) { VARSET_TP loopFPVars(VarSetOps::Intersection(this, loopVars, lvaFloatVars)); VARSET_TP inOutFPVars(VarSetOps::Intersection(this, pLoopDsc->lpVarInOut, lvaFloatVars)); pLoopDsc->lpLoopVarFPCount = VarSetOps::Count(this, loopFPVars); pLoopDsc->lpVarInOutFPCount = VarSetOps::Count(this, inOutFPVars); pLoopDsc->lpHoistedFPExprCount = 0; pLoopDsc->lpLoopVarCount -= pLoopDsc->lpLoopVarFPCount; pLoopDsc->lpVarInOutCount -= pLoopDsc->lpVarInOutFPCount; #ifdef DEBUG if (verbose) { printf(" INOUT-FP(%d)=", pLoopDsc->lpVarInOutFPCount); lvaDispVarSet(inOutFPVars); printf("\n LOOPV-FP(%d)=", pLoopDsc->lpLoopVarFPCount); lvaDispVarSet(loopFPVars); printf("\n"); } #endif } else // (floatVarsCount == 0) { pLoopDsc->lpLoopVarFPCount = 0; pLoopDsc->lpVarInOutFPCount = 0; pLoopDsc->lpHoistedFPExprCount = 0; } // Find the set of definitely-executed blocks. // Ideally, the definitely-executed blocks are the ones that post-dominate the entry block. // Until we have post-dominators, we'll special-case for single-exit blocks. // // Todo: it is not clear if this is a correctness requirement or a profitability heuristic. // It seems like the latter. Ideally have enough safeguards to prevent hoisting exception // or side-effect dependent things. // // We really should consider hoisting from conditionally executed blocks, if they are frequently executed // and it is safe to evaluate the tree early. // // In particular if we have a loop nest, when scanning the outer loop we should consider hoisting from blocks // in enclosed loops. However, this is likely to scale poorly, and we really should instead start // hoisting inner to outer. // ArrayStack<BasicBlock*> defExec(getAllocatorLoopHoist()); if (pLoopDsc->lpExitCnt == 1) { assert(pLoopDsc->lpExit != nullptr); JITDUMP(" Only considering hoisting in blocks that dominate exit block " FMT_BB "\n", pLoopDsc->lpExit->bbNum); BasicBlock* cur = pLoopDsc->lpExit; // Push dominators, until we reach "entry" or exit the loop. while (cur != nullptr && pLoopDsc->lpContains(cur) && cur != pLoopDsc->lpEntry) { defExec.Push(cur); cur = cur->bbIDom; } // If we didn't reach the entry block, give up and *just* push the entry block. if (cur != pLoopDsc->lpEntry) { JITDUMP(" -- odd, we didn't reach entry from exit via dominators. Only considering hoisting in entry " "block " FMT_BB "\n", pLoopDsc->lpEntry->bbNum); defExec.Reset(); } defExec.Push(pLoopDsc->lpEntry); } else // More than one exit { JITDUMP(" only considering hoisting in entry block " FMT_BB "\n", pLoopDsc->lpEntry->bbNum); // We'll assume that only the entry block is definitely executed. // We could in the future do better. defExec.Push(pLoopDsc->lpEntry); } optHoistLoopBlocks(lnum, &defExec, hoistCtxt); } bool Compiler::optIsProfitableToHoistTree(GenTree* tree, unsigned lnum) { LoopDsc* pLoopDsc = &optLoopTable[lnum]; bool loopContainsCall = (pLoopDsc->lpFlags & LPFLG_CONTAINS_CALL) != 0; int availRegCount; int hoistedExprCount; int loopVarCount; int varInOutCount; if (varTypeIsFloating(tree)) { hoistedExprCount = pLoopDsc->lpHoistedFPExprCount; loopVarCount = pLoopDsc->lpLoopVarFPCount; varInOutCount = pLoopDsc->lpVarInOutFPCount; availRegCount = CNT_CALLEE_SAVED_FLOAT; if (!loopContainsCall) { availRegCount += CNT_CALLEE_TRASH_FLOAT - 1; } #ifdef TARGET_ARM // For ARM each double takes two FP registers // For now on ARM we won't track singles/doubles // and instead just assume that we always have doubles. // availRegCount /= 2; #endif } else { hoistedExprCount = pLoopDsc->lpHoistedExprCount; loopVarCount = pLoopDsc->lpLoopVarCount; varInOutCount = pLoopDsc->lpVarInOutCount; availRegCount = CNT_CALLEE_SAVED - 1; if (!loopContainsCall) { availRegCount += CNT_CALLEE_TRASH - 1; } #ifndef TARGET_64BIT // For our 32-bit targets Long types take two registers. if (varTypeIsLong(tree->TypeGet())) { availRegCount = (availRegCount + 1) / 2; } #endif } // decrement the availRegCount by the count of expression that we have already hoisted. availRegCount -= hoistedExprCount; // the variables that are read/written inside the loop should // always be a subset of the InOut variables for the loop assert(loopVarCount <= varInOutCount); // When loopVarCount >= availRegCount we believe that all of the // available registers will get used to hold LclVars inside the loop. // This pessimistically assumes that each loopVar has a conflicting // lifetime with every other loopVar. // For this case we will hoist the expression only if is profitable // to place it in a stack home location (GetCostEx() >= 2*IND_COST_EX) // as we believe it will be placed in the stack or one of the other // loopVars will be spilled into the stack // if (loopVarCount >= availRegCount) { // Don't hoist expressions that are not heavy: tree->GetCostEx() < (2*IND_COST_EX) if (tree->GetCostEx() < (2 * IND_COST_EX)) { JITDUMP(" tree cost too low: %d < %d (loopVarCount %u >= availableRegCount %u)\n", tree->GetCostEx(), 2 * IND_COST_EX, loopVarCount, availRegCount); return false; } } // When varInOutCount < availRegCount we are know that there are // some available register(s) when we enter the loop body. // When varInOutCount == availRegCount there often will be a register // available when we enter the loop body, since a loop often defines a // LclVar on exit or there is often at least one LclVar that is worth // spilling to the stack to make way for this hoisted expression. // So we are willing hoist an expression with GetCostEx() == MIN_CSE_COST // if (varInOutCount > availRegCount) { // Don't hoist expressions that barely meet CSE cost requirements: tree->GetCostEx() == MIN_CSE_COST if (tree->GetCostEx() <= MIN_CSE_COST + 1) { JITDUMP(" tree not good CSE: %d <= %d (varInOutCount %u > availableRegCount %u)\n", tree->GetCostEx(), 2 * MIN_CSE_COST + 1, varInOutCount, availRegCount) return false; } } return true; } //------------------------------------------------------------------------ // optRecordLoopMemoryDependence: record that tree's value number // is dependent on a particular memory VN // // Arguments: // tree -- tree in question // block -- block containing tree // memoryVN -- VN for a "map" from a select operation encounterd // while computing the tree's VN // // Notes: // Only tracks trees in loops, and memory updates in the same loop nest. // So this is a coarse-grained dependence that is only usable for // hoisting tree out of its enclosing loops. // void Compiler::optRecordLoopMemoryDependence(GenTree* tree, BasicBlock* block, ValueNum memoryVN) { // If tree is not in a loop, we don't need to track its loop dependence. // unsigned const loopNum = block->bbNatLoopNum; if (loopNum == BasicBlock::NOT_IN_LOOP) { return; } // Find the loop associated with this memory VN. // unsigned updateLoopNum = vnStore->LoopOfVN(memoryVN); if (updateLoopNum >= BasicBlock::MAX_LOOP_NUM) { // There should be only two special non-loop loop nums. // assert((updateLoopNum == BasicBlock::MAX_LOOP_NUM) || (updateLoopNum == BasicBlock::NOT_IN_LOOP)); // memoryVN defined outside of any loop, we can ignore. // JITDUMP(" ==> Not updating loop memory dependence of [%06u], memory " FMT_VN " not defined in a loop\n", dspTreeID(tree), memoryVN); return; } // If the loop was removed, then record the dependence in the nearest enclosing loop, if any. // while ((optLoopTable[updateLoopNum].lpFlags & LPFLG_REMOVED) != 0) { unsigned const updateParentLoopNum = optLoopTable[updateLoopNum].lpParent; if (updateParentLoopNum == BasicBlock::NOT_IN_LOOP) { // Memory VN was defined in a loop, but no longer. // JITDUMP(" ==> Not updating loop memory dependence of [%06u], memory " FMT_VN " no longer defined in a loop\n", dspTreeID(tree), memoryVN); break; } JITDUMP(" ==> " FMT_LP " removed, updating dependence to parent " FMT_LP "\n", updateLoopNum, updateParentLoopNum); updateLoopNum = updateParentLoopNum; } // If the update block is not the the header of a loop containing // block, we can also ignore the update. // if (!optLoopContains(updateLoopNum, loopNum)) { JITDUMP(" ==> Not updating loop memory dependence of [%06u]/" FMT_LP ", memory " FMT_VN "/" FMT_LP " is not defined in an enclosing loop\n", dspTreeID(tree), loopNum, memoryVN, updateLoopNum); return; } // If we already have a recorded a loop entry block for this // tree, see if the new update is for a more closely nested // loop. // NodeToLoopMemoryBlockMap* const map = GetNodeToLoopMemoryBlockMap(); BasicBlock* mapBlock = nullptr; if (map->Lookup(tree, &mapBlock)) { unsigned const mapLoopNum = mapBlock->bbNatLoopNum; // If the update loop contains the existing map loop, // the existing map loop is more constraining. So no // update needed. // if (optLoopContains(updateLoopNum, mapLoopNum)) { JITDUMP(" ==> Not updating loop memory dependence of [%06u]; alrady constrained to " FMT_LP " nested in " FMT_LP "\n", dspTreeID(tree), mapLoopNum, updateLoopNum); return; } } // MemoryVN now describes the most constraining loop memory dependence // we know of. Update the map. // JITDUMP(" ==> Updating loop memory dependence of [%06u] to " FMT_LP "\n", dspTreeID(tree), updateLoopNum); map->Set(tree, optLoopTable[updateLoopNum].lpEntry, NodeToLoopMemoryBlockMap::Overwrite); } //------------------------------------------------------------------------ // optCopyLoopMemoryDependence: record that tree's loop memory dependence // is the same as some other tree. // // Arguments: // fromTree -- tree to copy dependence from // toTree -- tree in question // void Compiler::optCopyLoopMemoryDependence(GenTree* fromTree, GenTree* toTree) { NodeToLoopMemoryBlockMap* const map = GetNodeToLoopMemoryBlockMap(); BasicBlock* mapBlock = nullptr; if (map->Lookup(fromTree, &mapBlock)) { map->Set(toTree, mapBlock); } } //------------------------------------------------------------------------ // optHoistLoopBlocks: Hoist invariant expression out of the loop. // // Arguments: // loopNum - The number of the loop // blocks - A stack of blocks belonging to the loop // hoistContext - The loop hoist context // // Assumptions: // The `blocks` stack contains the definitely-executed blocks in // the loop, in the execution order, starting with the loop entry // block on top of the stack. // void Compiler::optHoistLoopBlocks(unsigned loopNum, ArrayStack<BasicBlock*>* blocks, LoopHoistContext* hoistContext) { class HoistVisitor : public GenTreeVisitor<HoistVisitor> { class Value { GenTree* m_node; public: bool m_hoistable; bool m_cctorDependent; bool m_invariant; #ifdef DEBUG const char* m_failReason; #endif Value(GenTree* node) : m_node(node), m_hoistable(false), m_cctorDependent(false), m_invariant(false) { #ifdef DEBUG m_failReason = "unset"; #endif } GenTree* Node() { return m_node; } }; ArrayStack<Value> m_valueStack; bool m_beforeSideEffect; unsigned m_loopNum; LoopHoistContext* m_hoistContext; BasicBlock* m_currentBlock; bool IsNodeHoistable(GenTree* node) { // TODO-CQ: This is a more restrictive version of a check that optIsCSEcandidate already does - it allows // a struct typed node if a class handle can be recovered from it. if (node->TypeGet() == TYP_STRUCT) { return false; } // Tree must be a suitable CSE candidate for us to be able to hoist it. return m_compiler->optIsCSEcandidate(node); } bool IsTreeVNInvariant(GenTree* tree) { ValueNum vn = tree->gtVNPair.GetLiberal(); bool vnIsInvariant = m_compiler->optVNIsLoopInvariant(vn, m_loopNum, &m_hoistContext->m_curLoopVnInvariantCache); // Even though VN is invariant in the loop (say a constant) its value may depend on position // of tree, so for loop hoisting we must also check that any memory read by tree // is also invariant in the loop. // if (vnIsInvariant) { vnIsInvariant = IsTreeLoopMemoryInvariant(tree); } return vnIsInvariant; } //------------------------------------------------------------------------ // IsTreeLoopMemoryInvariant: determine if the value number of tree // is dependent on the tree being executed within the current loop // // Arguments: // tree -- tree in question // // Returns: // true if tree could be evaluated just before loop and get the // same value. // // Note: // Calls are optimistically assumed to be invariant. // Caller must do their own analysis for these tree types. // bool IsTreeLoopMemoryInvariant(GenTree* tree) { if (tree->IsCall()) { // Calls are handled specially by hoisting, and loop memory dependence // must be checked by other means. // return true; } NodeToLoopMemoryBlockMap* const map = m_compiler->GetNodeToLoopMemoryBlockMap(); BasicBlock* loopEntryBlock = nullptr; if (map->Lookup(tree, &loopEntryBlock)) { for (MemoryKind memoryKind : allMemoryKinds()) { ValueNum loopMemoryVN = m_compiler->GetMemoryPerSsaData(loopEntryBlock->bbMemorySsaNumIn[memoryKind]) ->m_vnPair.GetLiberal(); if (!m_compiler->optVNIsLoopInvariant(loopMemoryVN, m_loopNum, &m_hoistContext->m_curLoopVnInvariantCache)) { return false; } } } return true; } public: enum { ComputeStack = false, DoPreOrder = true, DoPostOrder = true, DoLclVarsOnly = false, UseExecutionOrder = true, }; HoistVisitor(Compiler* compiler, unsigned loopNum, LoopHoistContext* hoistContext) : GenTreeVisitor(compiler) , m_valueStack(compiler->getAllocator(CMK_LoopHoist)) , m_beforeSideEffect(true) , m_loopNum(loopNum) , m_hoistContext(hoistContext) , m_currentBlock(nullptr) { } void HoistBlock(BasicBlock* block) { m_currentBlock = block; for (Statement* const stmt : block->NonPhiStatements()) { WalkTree(stmt->GetRootNodePointer(), nullptr); Value& top = m_valueStack.TopRef(); assert(top.Node() == stmt->GetRootNode()); if (top.m_hoistable) { m_compiler->optHoistCandidate(stmt->GetRootNode(), block, m_loopNum, m_hoistContext); } else { JITDUMP(" [%06u] not %s: %s\n", dspTreeID(top.Node()), top.m_invariant ? "invariant" : "hoistable", top.m_failReason); } m_valueStack.Reset(); } // Only unconditionally executed blocks in the loop are visited (see optHoistThisLoop) // so after we're done visiting the first block we need to assume the worst, that the // blocks that are not visisted have side effects. m_beforeSideEffect = false; } fgWalkResult PreOrderVisit(GenTree** use, GenTree* user) { GenTree* node = *use; m_valueStack.Emplace(node); return fgWalkResult::WALK_CONTINUE; } fgWalkResult PostOrderVisit(GenTree** use, GenTree* user) { GenTree* tree = *use; if (tree->OperIsLocal()) { GenTreeLclVarCommon* lclVar = tree->AsLclVarCommon(); unsigned lclNum = lclVar->GetLclNum(); // To be invariant a LclVar node must not be the LHS of an assignment ... bool isInvariant = !user->OperIs(GT_ASG) || (user->AsOp()->gtGetOp1() != tree); // and the variable must be in SSA ... isInvariant = isInvariant && m_compiler->lvaInSsa(lclNum) && lclVar->HasSsaName(); // and the SSA definition must be outside the loop we're hoisting from ... isInvariant = isInvariant && !m_compiler->optLoopTable[m_loopNum].lpContains( m_compiler->lvaGetDesc(lclNum)->GetPerSsaData(lclVar->GetSsaNum())->GetBlock()); // and the VN of the tree is considered invariant as well. // // TODO-CQ: This VN invariance check should not be necessary and in some cases it is conservative - it // is possible that the SSA def is outside the loop but VN does not understand what the node is doing // (e.g. LCL_FLD-based type reinterpretation) and assigns a "new, unique VN" to the node. This VN is // associated with the block where the node is, a loop block, and thus the VN is considered to not be // invariant. // On the other hand, it is possible for a SSA def to be inside the loop yet the use to be invariant, // if the defining expression is also invariant. In such a case the VN invariance would help but it is // blocked by the SSA invariance check. isInvariant = isInvariant && IsTreeVNInvariant(tree); Value& top = m_valueStack.TopRef(); assert(top.Node() == tree); if (isInvariant) { top.m_invariant = true; // In general it doesn't make sense to hoist a local node but there are exceptions, for example // LCL_FLD nodes (because then the variable cannot be enregistered and the node always turns // into a memory access). top.m_hoistable = IsNodeHoistable(tree); } #ifdef DEBUG if (!isInvariant) { top.m_failReason = "local, not rvalue / not in SSA / defined within current loop"; } else if (!top.m_hoistable) { top.m_failReason = "not handled by cse"; } #endif return fgWalkResult::WALK_CONTINUE; } // Initclass CLS_VARs and IconHandles are the base cases of cctor dependent trees. // In the IconHandle case, it's of course the dereference, rather than the constant itself, that is // truly dependent on the cctor. So a more precise approach would be to separately propagate // isCctorDependent and isAddressWhoseDereferenceWouldBeCctorDependent, but we don't for // simplicity/throughput; the constant itself would be considered non-hoistable anyway, since // optIsCSEcandidate returns false for constants. bool treeIsCctorDependent = ((tree->OperIs(GT_CLS_VAR) && ((tree->gtFlags & GTF_CLS_VAR_INITCLASS) != 0)) || (tree->OperIs(GT_CNS_INT) && ((tree->gtFlags & GTF_ICON_INITCLASS) != 0))); bool treeIsInvariant = true; bool treeHasHoistableChildren = false; int childCount; #ifdef DEBUG const char* failReason = "unknown"; #endif for (childCount = 0; m_valueStack.TopRef(childCount).Node() != tree; childCount++) { Value& child = m_valueStack.TopRef(childCount); if (child.m_hoistable) { treeHasHoistableChildren = true; } if (!child.m_invariant) { treeIsInvariant = false; INDEBUG(failReason = "variant child";) } if (child.m_cctorDependent) { // Normally, a parent of a cctor-dependent tree is also cctor-dependent. treeIsCctorDependent = true; // Check for the case where we can stop propagating cctor-dependent upwards. if (tree->OperIs(GT_COMMA) && (child.Node() == tree->gtGetOp2())) { GenTree* op1 = tree->gtGetOp1(); if (op1->OperIs(GT_CALL)) { GenTreeCall* call = op1->AsCall(); if ((call->gtCallType == CT_HELPER) && s_helperCallProperties.MayRunCctor(eeGetHelperNum(call->gtCallMethHnd))) { // Hoisting the comma is ok because it would hoist the initialization along // with the static field reference. treeIsCctorDependent = false; // Hoisting the static field without hoisting the initialization would be // incorrect, make sure we consider the field (which we flagged as // cctor-dependent) non-hoistable. noway_assert(!child.m_hoistable); } } } } } // If all the children of "tree" are hoistable, then "tree" itself can be hoisted, // unless it has a static var reference that can't be hoisted past its cctor call. bool treeIsHoistable = treeIsInvariant && !treeIsCctorDependent; #ifdef DEBUG if (treeIsInvariant && !treeIsHoistable) { failReason = "cctor dependent"; } #endif // But we must see if anything else prevents "tree" from being hoisted. // if (treeIsInvariant) { if (treeIsHoistable) { treeIsHoistable = IsNodeHoistable(tree); if (!treeIsHoistable) { INDEBUG(failReason = "not handled by cse";) } } // If it's a call, it must be a helper call, and be pure. // Further, if it may run a cctor, it must be labeled as "Hoistable" // (meaning it won't run a cctor because the class is not precise-init). if (treeIsHoistable && tree->IsCall()) { GenTreeCall* call = tree->AsCall(); if (call->gtCallType != CT_HELPER) { INDEBUG(failReason = "non-helper call";) treeIsHoistable = false; } else { CorInfoHelpFunc helpFunc = eeGetHelperNum(call->gtCallMethHnd); if (!s_helperCallProperties.IsPure(helpFunc)) { INDEBUG(failReason = "impure helper call";) treeIsHoistable = false; } else if (s_helperCallProperties.MayRunCctor(helpFunc) && ((call->gtFlags & GTF_CALL_HOISTABLE) == 0)) { INDEBUG(failReason = "non-hoistable helper call";) treeIsHoistable = false; } } } if (treeIsHoistable) { if (!m_beforeSideEffect) { // For now, we give up on an expression that might raise an exception if it is after the // first possible global side effect (and we assume we're after that if we're not in the first // block). // TODO-CQ: this is when we might do loop cloning. // if ((tree->gtFlags & GTF_EXCEPT) != 0) { INDEBUG(failReason = "side effect ordering constraint";) treeIsHoistable = false; } } } // Is the value of the whole tree loop invariant? treeIsInvariant = IsTreeVNInvariant(tree); // Is the value of the whole tree loop invariant? if (!treeIsInvariant) { // Here we have a tree that is not loop invariant and we thus cannot hoist INDEBUG(failReason = "tree VN is loop variant";) treeIsHoistable = false; } } // Next check if we need to set 'm_beforeSideEffect' to false. // // If we have already set it to false then we can skip these checks // if (m_beforeSideEffect) { // Is the value of the whole tree loop invariant? if (!treeIsInvariant) { // We have a tree that is not loop invariant and we thus cannot hoist assert(treeIsHoistable == false); // Check if we should clear m_beforeSideEffect. // If 'tree' can throw an exception then we need to set m_beforeSideEffect to false. // Note that calls are handled below if (tree->OperMayThrow(m_compiler) && !tree->IsCall()) { m_beforeSideEffect = false; } } // In the section below, we only care about memory side effects. We assume that expressions will // be hoisted so that they are evaluated in the same order as they would have been in the loop, // and therefore throw exceptions in the same order. // if (tree->IsCall()) { // If it's a call, it must be a helper call that does not mutate the heap. // Further, if it may run a cctor, it must be labeled as "Hoistable" // (meaning it won't run a cctor because the class is not precise-init). GenTreeCall* call = tree->AsCall(); if (call->gtCallType != CT_HELPER) { m_beforeSideEffect = false; } else { CorInfoHelpFunc helpFunc = eeGetHelperNum(call->gtCallMethHnd); if (s_helperCallProperties.MutatesHeap(helpFunc)) { m_beforeSideEffect = false; } else if (s_helperCallProperties.MayRunCctor(helpFunc) && (call->gtFlags & GTF_CALL_HOISTABLE) == 0) { m_beforeSideEffect = false; } // Additional check for helper calls that throw exceptions if (!treeIsInvariant) { // We have a tree that is not loop invariant and we thus cannot hoist assert(treeIsHoistable == false); // Does this helper call throw? if (!s_helperCallProperties.NoThrow(helpFunc)) { m_beforeSideEffect = false; } } } } else if (tree->OperIs(GT_ASG)) { // If the LHS of the assignment has a global reference, then assume it's a global side effect. GenTree* lhs = tree->AsOp()->gtOp1; if (lhs->gtFlags & GTF_GLOB_REF) { m_beforeSideEffect = false; } } else if (tree->OperIs(GT_XADD, GT_XORR, GT_XAND, GT_XCHG, GT_LOCKADD, GT_CMPXCHG, GT_MEMORYBARRIER)) { // If this node is a MEMORYBARRIER or an Atomic operation // then don't hoist and stop any further hoisting after this node INDEBUG(failReason = "atomic op or memory barrier";) treeIsHoistable = false; m_beforeSideEffect = false; } } // If this 'tree' is hoistable then we return and the caller will // decide to hoist it as part of larger hoistable expression. // if (!treeIsHoistable && treeHasHoistableChildren) { // The current tree is not hoistable but it has hoistable children that we need // to hoist now. // // In order to preserve the original execution order, we also need to hoist any // other hoistable trees that we encountered so far. // At this point the stack contains (in top to bottom order): // - the current node's children // - the current node // - ancestors of the current node and some of their descendants // // The ancestors have not been visited yet in post order so they're not hoistable // (and they cannot become hoistable because the current node is not) but some of // their descendants may have already been traversed and be hoistable. // // The execution order is actually bottom to top so we'll start hoisting from // the bottom of the stack, skipping the current node (which is expected to not // be hoistable). // // Note that the treeHasHoistableChildren check avoids unnecessary stack traversing // and also prevents hoisting trees too early. If the current tree is not hoistable // and it doesn't have any hoistable children then there's no point in hoisting any // other trees. Doing so would interfere with the cctor dependent case, where the // cctor dependent node is initially not hoistable and may become hoistable later, // when its parent comma node is visited. // for (int i = 0; i < m_valueStack.Height(); i++) { Value& value = m_valueStack.BottomRef(i); if (value.m_hoistable) { assert(value.Node() != tree); // Don't hoist this tree again. value.m_hoistable = false; value.m_invariant = false; m_compiler->optHoistCandidate(value.Node(), m_currentBlock, m_loopNum, m_hoistContext); } else if (value.Node() != tree) { JITDUMP(" [%06u] not %s: %s\n", dspTreeID(value.Node()), value.m_invariant ? "invariant" : "hoistable", value.m_failReason); } } } m_valueStack.Pop(childCount); Value& top = m_valueStack.TopRef(); assert(top.Node() == tree); top.m_hoistable = treeIsHoistable; top.m_cctorDependent = treeIsCctorDependent; top.m_invariant = treeIsInvariant; #ifdef DEBUG if (!top.m_invariant || !top.m_hoistable) { top.m_failReason = failReason; } #endif return fgWalkResult::WALK_CONTINUE; } }; LoopDsc* loopDsc = &optLoopTable[loopNum]; assert(blocks->Top() == loopDsc->lpEntry); HoistVisitor visitor(this, loopNum, hoistContext); while (!blocks->Empty()) { BasicBlock* block = blocks->Pop(); weight_t blockWeight = block->getBBWeight(this); JITDUMP("\n optHoistLoopBlocks " FMT_BB " (weight=%6s) of loop " FMT_LP " <" FMT_BB ".." FMT_BB ">\n", block->bbNum, refCntWtd2str(blockWeight), loopNum, loopDsc->lpTop->bbNum, loopDsc->lpBottom->bbNum); if (blockWeight < (BB_UNITY_WEIGHT / 10)) { JITDUMP(" block weight is too small to perform hoisting.\n"); continue; } visitor.HoistBlock(block); } } void Compiler::optHoistCandidate(GenTree* tree, BasicBlock* treeBb, unsigned lnum, LoopHoistContext* hoistCtxt) { assert(lnum != BasicBlock::NOT_IN_LOOP); // It must pass the hoistable profitablity tests for this loop level if (!optIsProfitableToHoistTree(tree, lnum)) { JITDUMP(" ... not profitable to hoist\n"); return; } if (hoistCtxt->m_hoistedInParentLoops.Lookup(tree->gtVNPair.GetLiberal())) { JITDUMP(" ... already hoisted same VN in parent\n"); // already hoisted in a parent loop, so don't hoist this expression. return; } if (hoistCtxt->GetHoistedInCurLoop(this)->Lookup(tree->gtVNPair.GetLiberal())) { JITDUMP(" ... already hoisted same VN in current\n"); // already hoisted this expression in the current loop, so don't hoist this expression. return; } // Create a loop pre-header in which to put the hoisted code. fgCreateLoopPreHeader(lnum); // If the block we're hoisting from and the pre-header are in different EH regions, don't hoist. // TODO: we could probably hoist things that won't raise exceptions, such as constants. if (!BasicBlock::sameTryRegion(optLoopTable[lnum].lpHead, treeBb)) { JITDUMP(" ... not hoisting in " FMT_LP ", eh region constraint (pre-header try index %d, candidate " FMT_BB " try index %d\n", lnum, optLoopTable[lnum].lpHead->bbTryIndex, treeBb->bbNum, treeBb->bbTryIndex); return; } // Expression can be hoisted optPerformHoistExpr(tree, treeBb, lnum); // Increment lpHoistedExprCount or lpHoistedFPExprCount if (!varTypeIsFloating(tree->TypeGet())) { optLoopTable[lnum].lpHoistedExprCount++; #ifndef TARGET_64BIT // For our 32-bit targets Long types take two registers. if (varTypeIsLong(tree->TypeGet())) { optLoopTable[lnum].lpHoistedExprCount++; } #endif } else // Floating point expr hoisted { optLoopTable[lnum].lpHoistedFPExprCount++; } // Record the hoisted expression in hoistCtxt hoistCtxt->GetHoistedInCurLoop(this)->Set(tree->gtVNPair.GetLiberal(), true); } bool Compiler::optVNIsLoopInvariant(ValueNum vn, unsigned lnum, VNSet* loopVnInvariantCache) { // If it is not a VN, is not loop-invariant. if (vn == ValueNumStore::NoVN) { return false; } // We'll always short-circuit constants. if (vnStore->IsVNConstant(vn) || vn == vnStore->VNForVoid()) { return true; } // If we've done this query previously, don't repeat. bool previousRes = false; if (loopVnInvariantCache->Lookup(vn, &previousRes)) { return previousRes; } bool res = true; VNFuncApp funcApp; if (vnStore->GetVNFunc(vn, &funcApp)) { if (funcApp.m_func == VNF_PhiDef) { // Is the definition within the loop? If so, is not loop-invariant. unsigned lclNum = funcApp.m_args[0]; unsigned ssaNum = funcApp.m_args[1]; LclSsaVarDsc* ssaDef = lvaTable[lclNum].GetPerSsaData(ssaNum); res = !optLoopContains(lnum, ssaDef->GetBlock()->bbNatLoopNum); } else if (funcApp.m_func == VNF_PhiMemoryDef) { BasicBlock* defnBlk = reinterpret_cast<BasicBlock*>(vnStore->ConstantValue<ssize_t>(funcApp.m_args[0])); res = !optLoopContains(lnum, defnBlk->bbNatLoopNum); } else if (funcApp.m_func == VNF_MemOpaque) { const unsigned vnLoopNum = funcApp.m_args[0]; // Check for the special "ambiguous" loop MemOpaque VN. // This is considered variant in every loop. // if (vnLoopNum == BasicBlock::MAX_LOOP_NUM) { res = false; } else { res = !optLoopContains(lnum, vnLoopNum); } } else { for (unsigned i = 0; i < funcApp.m_arity; i++) { // 4th arg of mapStore identifies the loop where the store happens. // if (funcApp.m_func == VNF_MapStore) { assert(funcApp.m_arity == 4); if (i == 3) { const unsigned vnLoopNum = funcApp.m_args[3]; res = !optLoopContains(lnum, vnLoopNum); break; } } // TODO-CQ: We need to either make sure that *all* VN functions // always take VN args, or else have a list of arg positions to exempt, as implicitly // constant. if (!optVNIsLoopInvariant(funcApp.m_args[i], lnum, loopVnInvariantCache)) { res = false; break; } } } } loopVnInvariantCache->Set(vn, res); return res; } //------------------------------------------------------------------------------ // fgCreateLoopPreHeader: Creates a pre-header block for the given loop. // A pre-header is a block outside the loop that falls through or branches to the loop // entry block. It is the only non-loop predecessor block to the entry block (thus, it // dominates the entry block). The pre-header replaces the current lpHead in the loop table. // The pre-header will be placed immediately before the loop top block, which is the first // block of the loop in program order. // // Once a loop has a pre-header, calling this function will immediately return without // creating another. // // If there already exists a block that meets the pre-header requirements, that block is marked // as a pre-header, and no flow graph modification is made. // // Note that the pre-header block can be in a different EH region from blocks in the loop, including the // entry block. Code doing hoisting is required to check the EH legality of hoisting to the pre-header // before doing so. // // Since the flow graph has changed, if needed, fgUpdateChangedFlowGraph() should be called after this // to update the block numbers, reachability, and dominators. The loop table does not need to be rebuilt. // The new pre-header block does have a copy of the previous 'head' reachability set, but the pre-header // itself doesn't exist in any reachability/dominator sets. `fgDominate` has code to specifically // handle queries about the pre-header dominating other blocks, even without re-computing dominators. // The preds lists have been maintained. // // Currently, if you create a pre-header but don't put any code in it, any subsequent fgUpdateFlowGraph() // pass might choose to compact the empty pre-header with a predecessor block. That is, a pre-header // block might disappear if not used. // // The code does not depend on the order of the BasicBlock bbNum. // // Arguments: // lnum - loop index // void Compiler::fgCreateLoopPreHeader(unsigned lnum) { #ifdef DEBUG if (verbose) { printf("*************** In fgCreateLoopPreHeader for " FMT_LP "\n", lnum); } #endif // DEBUG LoopDsc& loop = optLoopTable[lnum]; // Have we already created a loop-preheader block? if (loop.lpFlags & LPFLG_HAS_PREHEAD) { JITDUMP(" pre-header already exists\n"); INDEBUG(loop.lpValidatePreHeader()); return; } BasicBlock* head = loop.lpHead; BasicBlock* top = loop.lpTop; BasicBlock* entry = loop.lpEntry; // Ensure that lpHead always dominates lpEntry noway_assert(fgDominate(head, entry)); // If `head` is already a valid pre-header, then mark it so. if (head->GetUniqueSucc() == entry) { // The loop entry must have a single non-loop predecessor, which is the pre-header. bool loopHasProperEntryBlockPreds = true; for (BasicBlock* const predBlock : entry->PredBlocks()) { if (head == predBlock) { continue; } const bool intraLoopPred = optLoopContains(lnum, predBlock->bbNatLoopNum); if (!intraLoopPred) { loopHasProperEntryBlockPreds = false; break; } } if (loopHasProperEntryBlockPreds) { // Does this existing region have the same EH region index that we will use when we create the pre-header? // If not, we want to create a new pre-header with the expected region. bool headHasCorrectEHRegion = false; if ((top->bbFlags & BBF_TRY_BEG) != 0) { assert(top->hasTryIndex()); unsigned newTryIndex = ehTrueEnclosingTryIndexIL(top->getTryIndex()); unsigned compareTryIndex = head->hasTryIndex() ? head->getTryIndex() : EHblkDsc::NO_ENCLOSING_INDEX; headHasCorrectEHRegion = newTryIndex == compareTryIndex; } else { headHasCorrectEHRegion = BasicBlock::sameTryRegion(head, top); } if (headHasCorrectEHRegion) { JITDUMP(" converting existing header " FMT_BB " into pre-header\n", head->bbNum); loop.lpFlags |= LPFLG_HAS_PREHEAD; assert((head->bbFlags & BBF_LOOP_PREHEADER) == 0); // It isn't already a loop pre-header head->bbFlags |= BBF_LOOP_PREHEADER; INDEBUG(loop.lpValidatePreHeader()); INDEBUG(fgDebugCheckLoopTable()); return; } else { JITDUMP(" existing head " FMT_BB " doesn't have correct EH region\n", head->bbNum); } } else { JITDUMP(" existing head " FMT_BB " isn't unique non-loop predecessor of loop entry\n", head->bbNum); } } else { JITDUMP(" existing head " FMT_BB " doesn't have unique successor branching to loop entry\n", head->bbNum); } // Allocate a new basic block for the pre-header. const bool isTopEntryLoop = loop.lpIsTopEntry(); BasicBlock* preHead = bbNewBasicBlock(isTopEntryLoop ? BBJ_NONE : BBJ_ALWAYS); preHead->bbFlags |= BBF_INTERNAL | BBF_LOOP_PREHEADER; if (!isTopEntryLoop) { preHead->bbJumpDest = entry; } // Must set IL code offset preHead->bbCodeOffs = top->bbCodeOffs; // Set the default value of the preHead weight in case we don't have // valid profile data and since this blocks weight is just an estimate // we clear any BBF_PROF_WEIGHT flag that we may have picked up from head. // preHead->inheritWeight(head); preHead->bbFlags &= ~BBF_PROF_WEIGHT; // Copy the bbReach set from head for the new preHead block preHead->bbReach = BlockSetOps::MakeEmpty(this); BlockSetOps::Assign(this, preHead->bbReach, head->bbReach); // Also include 'head' in the preHead bbReach set BlockSetOps::AddElemD(this, preHead->bbReach, head->bbNum); #ifdef DEBUG if (verbose) { printf("\nCreated PreHeader (" FMT_BB ") for loop " FMT_LP " (" FMT_BB " - " FMT_BB, preHead->bbNum, lnum, top->bbNum, loop.lpBottom->bbNum); if (!isTopEntryLoop) { printf(", entry " FMT_BB, entry->bbNum); } printf("), with weight = %s\n", refCntWtd2str(preHead->getBBWeight(this))); } #endif // The preheader block is part of the containing loop (if any). preHead->bbNatLoopNum = loop.lpParent; if (fgIsUsingProfileWeights() && (head->bbJumpKind == BBJ_COND)) { if ((head->bbWeight == BB_ZERO_WEIGHT) || (entry->bbWeight == BB_ZERO_WEIGHT)) { preHead->bbWeight = BB_ZERO_WEIGHT; preHead->bbFlags |= BBF_RUN_RARELY; } else { // Allow for either the fall-through or branch to target 'entry'. BasicBlock* skipLoopBlock; if (head->bbNext == entry) { skipLoopBlock = head->bbJumpDest; } else { skipLoopBlock = head->bbNext; } assert(skipLoopBlock != entry); bool allValidProfileWeights = (head->hasProfileWeight() && skipLoopBlock->hasProfileWeight() && entry->hasProfileWeight()); if (allValidProfileWeights) { weight_t loopEnteredCount; weight_t loopSkippedCount; bool useEdgeWeights = fgHaveValidEdgeWeights; if (useEdgeWeights) { const flowList* edgeToEntry = fgGetPredForBlock(entry, head); const flowList* edgeToSkipLoop = fgGetPredForBlock(skipLoopBlock, head); noway_assert(edgeToEntry != nullptr); noway_assert(edgeToSkipLoop != nullptr); loopEnteredCount = (edgeToEntry->edgeWeightMin() + edgeToEntry->edgeWeightMax()) / 2.0; loopSkippedCount = (edgeToSkipLoop->edgeWeightMin() + edgeToSkipLoop->edgeWeightMax()) / 2.0; // Watch out for cases where edge weights were not properly maintained // so that it appears no profile flow enters the loop. // useEdgeWeights = !fgProfileWeightsConsistent(loopEnteredCount, BB_ZERO_WEIGHT); } if (!useEdgeWeights) { loopEnteredCount = entry->bbWeight; loopSkippedCount = skipLoopBlock->bbWeight; } weight_t loopTakenRatio = loopEnteredCount / (loopEnteredCount + loopSkippedCount); JITDUMP("%s edge weights; loopEnterCount " FMT_WT " loopSkipCount " FMT_WT " taken ratio " FMT_WT "\n", fgHaveValidEdgeWeights ? (useEdgeWeights ? "valid" : "ignored") : "invalid", loopEnteredCount, loopSkippedCount, loopTakenRatio); // Calculate a good approximation of the preHead's block weight weight_t preHeadWeight = (head->bbWeight * loopTakenRatio); preHead->setBBProfileWeight(preHeadWeight); noway_assert(!preHead->isRunRarely()); } } } // Link in the preHead block fgInsertBBbefore(top, preHead); // Ideally we would re-run SSA and VN if we optimized by doing loop hoisting. // However, that is too expensive at this point. Instead, we update the phi // node block references, if we created pre-header block due to hoisting. // This is sufficient because any definition participating in SSA that flowed // into the phi via the loop header block will now flow through the preheader // block from the header block. for (Statement* const stmt : top->Statements()) { GenTree* tree = stmt->GetRootNode(); if (tree->OperGet() != GT_ASG) { break; } GenTree* op2 = tree->gtGetOp2(); if (op2->OperGet() != GT_PHI) { break; } for (GenTreePhi::Use& use : op2->AsPhi()->Uses()) { GenTreePhiArg* phiArg = use.GetNode()->AsPhiArg(); if (phiArg->gtPredBB == head) { phiArg->gtPredBB = preHead; } } } // In which EH region should the pre-header live? // // The pre-header block is added immediately before `top`. // // The `top` block cannot be the first block of a filter or handler: `top` must have a back-edge from a // BBJ_COND or BBJ_ALWAYS within the loop, and a filter or handler cannot be branched to like that. // // The `top` block can be the first block of a `try` region, and you can fall into or branch to the // first block of a `try` region. (For top-entry loops, `top` will both be the target of a back-edge // and a fall-through from the previous block.) // // If the `top` block is NOT the first block of a `try` region, the pre-header can simply extend the // `top` block region. // // If the `top` block IS the first block of a `try`, we find its parent region and use that. For mutual-protect // regions, we need to find the actual parent, as the block stores the most "nested" mutual region. For // non-mutual-protect regions, due to EH canonicalization, we are guaranteed that no other EH regions begin // on the same block, so looking to just the parent is sufficient. Note that we can't just extend the EH // region of `top` to the pre-header, because `top` will still be the target of backward branches from // within the loop. If those backward branches come from outside the `try` (say, only the top half of the loop // is a `try` region), then we can't branch to a non-first `try` region block (you always must entry the `try` // in the first block). // // Note that hoisting any code out of a try region, for example, to a pre-header block in a different // EH region, needs to ensure that no exceptions will be thrown. assert(!fgIsFirstBlockOfFilterOrHandler(top)); if ((top->bbFlags & BBF_TRY_BEG) != 0) { // `top` is the beginning of a try block. Figure out the EH region to use. assert(top->hasTryIndex()); unsigned short newTryIndex = (unsigned short)ehTrueEnclosingTryIndexIL(top->getTryIndex()); if (newTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) { // No EH try index. preHead->clearTryIndex(); } else { preHead->setTryIndex(newTryIndex); } // What handler region to use? Use the same handler region as `top`. preHead->copyHndIndex(top); } else { // `top` is not the beginning of a try block. Just extend the EH region to the pre-header. // We don't need to call `fgExtendEHRegionBefore()` because all the special handling that function // does it to account for `top` being the first block of a `try` or handler region, which we know // is not true. preHead->copyEHRegion(top); } // TODO-CQ: set dominators for this block, to allow loop optimizations requiring them // (e.g: hoisting expression in a loop with the same 'head' as this one) // Update the loop table loop.lpHead = preHead; loop.lpFlags |= LPFLG_HAS_PREHEAD; // The new block becomes the 'head' of the loop - update bbRefs and bbPreds. // All non-loop predecessors of 'entry' now jump to 'preHead'. preHead->bbRefs = 0; bool checkNestedLoops = false; for (BasicBlock* const predBlock : entry->PredBlocks()) { // Is the predBlock in the loop? // // We want to use: // const bool intraLoopPred = loop.lpContains(predBlock); // but we can't depend on the bbNum ordering. // // Previously, this code wouldn't redirect predecessors dominated by the entry. However, that can // lead to a case where non-loop predecessor is dominated by the loop entry, and that predecessor // continues to branch to the entry, not the new pre-header. This is normally ok for hoisting // because it will introduce an SSA PHI def within the loop, which will inhibit hoisting. However, // it complicates the definition of what a pre-header is. const bool intraLoopPred = optLoopContains(lnum, predBlock->bbNatLoopNum); if (intraLoopPred) { if (predBlock != loop.lpBottom) { checkNestedLoops = true; } continue; } switch (predBlock->bbJumpKind) { case BBJ_NONE: // This 'entry' predecessor that isn't dominated by 'entry' must be outside the loop, // meaning it must be fall-through to 'entry', and we must have a top-entry loop. noway_assert((entry == top) && (predBlock == head) && (predBlock->bbNext == preHead)); fgRemoveRefPred(entry, predBlock); fgAddRefPred(preHead, predBlock); break; case BBJ_COND: if (predBlock->bbJumpDest == entry) { predBlock->bbJumpDest = preHead; noway_assert(predBlock->bbNext != preHead); } else { noway_assert((entry == top) && (predBlock == head) && (predBlock->bbNext == preHead)); } fgRemoveRefPred(entry, predBlock); fgAddRefPred(preHead, predBlock); break; case BBJ_ALWAYS: case BBJ_EHCATCHRET: noway_assert(predBlock->bbJumpDest == entry); predBlock->bbJumpDest = preHead; fgRemoveRefPred(entry, predBlock); fgAddRefPred(preHead, predBlock); break; case BBJ_SWITCH: unsigned jumpCnt; jumpCnt = predBlock->bbJumpSwt->bbsCount; BasicBlock** jumpTab; jumpTab = predBlock->bbJumpSwt->bbsDstTab; do { assert(*jumpTab); if ((*jumpTab) == entry) { (*jumpTab) = preHead; fgRemoveRefPred(entry, predBlock); fgAddRefPred(preHead, predBlock); } } while (++jumpTab, --jumpCnt); UpdateSwitchTableTarget(predBlock, entry, preHead); break; default: noway_assert(!"Unexpected bbJumpKind"); break; } } flowList* const edgeToPreHeader = fgGetPredForBlock(preHead, head); noway_assert(edgeToPreHeader != nullptr); edgeToPreHeader->setEdgeWeights(preHead->bbWeight, preHead->bbWeight, preHead); noway_assert(fgGetPredForBlock(entry, preHead) == nullptr); flowList* const edgeFromPreHeader = fgAddRefPred(entry, preHead); edgeFromPreHeader->setEdgeWeights(preHead->bbWeight, preHead->bbWeight, entry); /* If we found at least one back-edge in the flowgraph pointing to the entry of the loop (other than the back-edge of the loop we are considering) then we likely have nested do-while loops with the same entry block and inserting the preheader block changes the head of all the nested loops. Now we will update this piece of information in the loop table, and mark all nested loops as having a preheader (the preheader block can be shared among all nested do-while loops with the same entry block). */ if (checkNestedLoops) { for (unsigned l = 0; l < optLoopCount; l++) { if (optLoopTable[l].lpHead == head) { // loop.lpHead was already changed from 'head' to 'preHead' noway_assert(l != lnum); // If it shares head, it must be a top-entry loop that shares top. noway_assert(optLoopTable[l].lpEntry == top); optUpdateLoopHead(l, optLoopTable[l].lpHead, preHead); optLoopTable[l].lpFlags |= LPFLG_HAS_PREHEAD; #ifdef DEBUG if (verbose) { printf("Same PreHeader (" FMT_BB ") can be used for loop " FMT_LP " (" FMT_BB " - " FMT_BB ")\n\n", preHead->bbNum, l, top->bbNum, optLoopTable[l].lpBottom->bbNum); } #endif } } } // We added a new block and altered the preds list; make sure the flow graph has been marked as being modified. assert(fgModified); #ifdef DEBUG fgDebugCheckBBlist(); fgVerifyHandlerTab(); fgDebugCheckLoopTable(); if (verbose) { JITDUMP("*************** After fgCreateLoopPreHeader for " FMT_LP "\n", lnum); fgDispBasicBlocks(); fgDispHandlerTab(); optPrintLoopTable(); } #endif } bool Compiler::optBlockIsLoopEntry(BasicBlock* blk, unsigned* pLnum) { for (unsigned lnum = blk->bbNatLoopNum; lnum != BasicBlock::NOT_IN_LOOP; lnum = optLoopTable[lnum].lpParent) { if (optLoopTable[lnum].lpFlags & LPFLG_REMOVED) { continue; } if (optLoopTable[lnum].lpEntry == blk) { *pLnum = lnum; return true; } } return false; } void Compiler::optComputeLoopSideEffects() { unsigned lnum; for (lnum = 0; lnum < optLoopCount; lnum++) { VarSetOps::AssignNoCopy(this, optLoopTable[lnum].lpVarInOut, VarSetOps::MakeEmpty(this)); VarSetOps::AssignNoCopy(this, optLoopTable[lnum].lpVarUseDef, VarSetOps::MakeEmpty(this)); optLoopTable[lnum].lpFlags &= ~LPFLG_CONTAINS_CALL; } for (lnum = 0; lnum < optLoopCount; lnum++) { if (optLoopTable[lnum].lpFlags & LPFLG_REMOVED) { continue; } if (optLoopTable[lnum].lpParent == BasicBlock::NOT_IN_LOOP) { // Is outermost... optComputeLoopNestSideEffects(lnum); } } VarSetOps::AssignNoCopy(this, lvaFloatVars, VarSetOps::MakeEmpty(this)); #ifndef TARGET_64BIT VarSetOps::AssignNoCopy(this, lvaLongVars, VarSetOps::MakeEmpty(this)); #endif for (unsigned i = 0; i < lvaCount; i++) { LclVarDsc* varDsc = lvaGetDesc(i); if (varDsc->lvTracked) { if (varTypeIsFloating(varDsc->lvType)) { VarSetOps::AddElemD(this, lvaFloatVars, varDsc->lvVarIndex); } #ifndef TARGET_64BIT else if (varTypeIsLong(varDsc->lvType)) { VarSetOps::AddElemD(this, lvaLongVars, varDsc->lvVarIndex); } #endif } } } void Compiler::optComputeLoopNestSideEffects(unsigned lnum) { JITDUMP("optComputeLoopNestSideEffects for " FMT_LP "\n", lnum); assert(optLoopTable[lnum].lpParent == BasicBlock::NOT_IN_LOOP); // Requires: lnum is outermost. for (BasicBlock* const bbInLoop : optLoopTable[lnum].LoopBlocks()) { if (!optComputeLoopSideEffectsOfBlock(bbInLoop)) { // When optComputeLoopSideEffectsOfBlock returns false, we encountered // a block that was moved into the loop range (by fgReorderBlocks), // but not marked correctly as being inside the loop. // We conservatively mark this loop (and any outer loops) // as having memory havoc side effects. // // Record that all loops containing this block have memory havoc effects. // optRecordLoopNestsMemoryHavoc(lnum, fullMemoryKindSet); // All done, no need to keep visiting more blocks break; } } } void Compiler::optRecordLoopNestsMemoryHavoc(unsigned lnum, MemoryKindSet memoryHavoc) { // We should start out with 'lnum' set to a valid natural loop index assert(lnum != BasicBlock::NOT_IN_LOOP); while (lnum != BasicBlock::NOT_IN_LOOP) { for (MemoryKind memoryKind : allMemoryKinds()) { if ((memoryHavoc & memoryKindSet(memoryKind)) != 0) { optLoopTable[lnum].lpLoopHasMemoryHavoc[memoryKind] = true; } } // Move lnum to the next outtermost loop that we need to mark lnum = optLoopTable[lnum].lpParent; } } bool Compiler::optComputeLoopSideEffectsOfBlock(BasicBlock* blk) { unsigned mostNestedLoop = blk->bbNatLoopNum; JITDUMP("optComputeLoopSideEffectsOfBlock " FMT_BB ", mostNestedLoop %d\n", blk->bbNum, mostNestedLoop); if (mostNestedLoop == BasicBlock::NOT_IN_LOOP) { return false; } AddVariableLivenessAllContainingLoops(mostNestedLoop, blk); // MemoryKinds for which an in-loop call or store has arbitrary effects. MemoryKindSet memoryHavoc = emptyMemoryKindSet; // Now iterate over the remaining statements, and their trees. for (Statement* const stmt : blk->NonPhiStatements()) { for (GenTree* const tree : stmt->TreeList()) { genTreeOps oper = tree->OperGet(); // Even after we set memoryHavoc we still may want to know if a loop contains calls if (memoryHavoc == fullMemoryKindSet) { if (oper == GT_CALL) { // Record that this loop contains a call AddContainsCallAllContainingLoops(mostNestedLoop); } // If we just set LPFLG_CONTAINS_CALL or it was previously set if (optLoopTable[mostNestedLoop].lpFlags & LPFLG_CONTAINS_CALL) { // We can early exit after both memoryHavoc and LPFLG_CONTAINS_CALL are both set to true. break; } // We are just looking for GT_CALL nodes after memoryHavoc was set. continue; } // otherwise memoryHavoc is not set for at least one heap ID assert(memoryHavoc != fullMemoryKindSet); // This body is a distillation of the memory side-effect code of value numbering. // We also do a very limited analysis if byref PtrTo values, to cover some cases // that the compiler creates. if (oper == GT_ASG) { GenTree* lhs = tree->AsOp()->gtOp1->gtEffectiveVal(/*commaOnly*/ true); if (lhs->OperGet() == GT_IND) { GenTree* arg = lhs->AsOp()->gtOp1->gtEffectiveVal(/*commaOnly*/ true); FieldSeqNode* fldSeqArrElem = nullptr; if ((tree->gtFlags & GTF_IND_VOLATILE) != 0) { memoryHavoc |= memoryKindSet(GcHeap, ByrefExposed); continue; } ArrayInfo arrInfo; if (arg->TypeGet() == TYP_BYREF && arg->OperGet() == GT_LCL_VAR) { // If it's a local byref for which we recorded a value number, use that... GenTreeLclVar* argLcl = arg->AsLclVar(); if (lvaInSsa(argLcl->GetLclNum()) && argLcl->HasSsaName()) { ValueNum argVN = lvaTable[argLcl->GetLclNum()].GetPerSsaData(argLcl->GetSsaNum())->m_vnPair.GetLiberal(); VNFuncApp funcApp; if (argVN != ValueNumStore::NoVN && vnStore->GetVNFunc(argVN, &funcApp) && funcApp.m_func == VNF_PtrToArrElem) { assert(vnStore->IsVNHandle(funcApp.m_args[0])); CORINFO_CLASS_HANDLE elemType = CORINFO_CLASS_HANDLE(vnStore->ConstantValue<size_t>(funcApp.m_args[0])); AddModifiedElemTypeAllContainingLoops(mostNestedLoop, elemType); // Don't set memoryHavoc for GcHeap below. Do set memoryHavoc for ByrefExposed // (conservatively assuming that a byref may alias the array element) memoryHavoc |= memoryKindSet(ByrefExposed); continue; } } // Otherwise... memoryHavoc |= memoryKindSet(GcHeap, ByrefExposed); } // Is the LHS an array index expression? else if (lhs->ParseArrayElemForm(this, &arrInfo, &fldSeqArrElem)) { // We actually ignore "fldSeq" -- any modification to an S[], at any // field of "S", will lose all information about the array type. CORINFO_CLASS_HANDLE elemTypeEq = EncodeElemType(arrInfo.m_elemType, arrInfo.m_elemStructType); AddModifiedElemTypeAllContainingLoops(mostNestedLoop, elemTypeEq); // Conservatively assume byrefs may alias this array element memoryHavoc |= memoryKindSet(ByrefExposed); } else { GenTree* baseAddr = nullptr; FieldSeqNode* fldSeq = nullptr; if (arg->IsFieldAddr(this, &baseAddr, &fldSeq)) { assert((fldSeq != nullptr) && (fldSeq != FieldSeqStore::NotAField()) && !fldSeq->IsPseudoField()); FieldKindForVN fieldKind = (baseAddr != nullptr) ? FieldKindForVN::WithBaseAddr : FieldKindForVN::SimpleStatic; AddModifiedFieldAllContainingLoops(mostNestedLoop, fldSeq->GetFieldHandle(), fieldKind); // Conservatively assume byrefs may alias this object. memoryHavoc |= memoryKindSet(ByrefExposed); } else { memoryHavoc |= memoryKindSet(GcHeap, ByrefExposed); } } } else if (lhs->OperIsBlk()) { GenTreeLclVarCommon* lclVarTree; bool isEntire; if (!tree->DefinesLocal(this, &lclVarTree, &isEntire)) { // For now, assume arbitrary side effects on GcHeap/ByrefExposed... memoryHavoc |= memoryKindSet(GcHeap, ByrefExposed); } else if (lvaVarAddrExposed(lclVarTree->GetLclNum())) { memoryHavoc |= memoryKindSet(ByrefExposed); } } else if (lhs->OperGet() == GT_CLS_VAR) { AddModifiedFieldAllContainingLoops(mostNestedLoop, lhs->AsClsVar()->gtClsVarHnd, FieldKindForVN::SimpleStatic); // Conservatively assume byrefs may alias this static field memoryHavoc |= memoryKindSet(ByrefExposed); } // Otherwise, must be local lhs form. I should assert that. else if (lhs->OperGet() == GT_LCL_VAR) { GenTreeLclVar* lhsLcl = lhs->AsLclVar(); GenTree* rhs = tree->AsOp()->gtOp2; ValueNum rhsVN = rhs->gtVNPair.GetLiberal(); // If we gave the RHS a value number, propagate it. if (rhsVN != ValueNumStore::NoVN) { rhsVN = vnStore->VNNormalValue(rhsVN); if (lvaInSsa(lhsLcl->GetLclNum()) && lhsLcl->HasSsaName()) { lvaTable[lhsLcl->GetLclNum()] .GetPerSsaData(lhsLcl->GetSsaNum()) ->m_vnPair.SetLiberal(rhsVN); } } // If the local is address-exposed, count this as ByrefExposed havoc if (lvaVarAddrExposed(lhsLcl->GetLclNum())) { memoryHavoc |= memoryKindSet(ByrefExposed); } } } else // if (oper != GT_ASG) { switch (oper) { case GT_COMMA: tree->gtVNPair = tree->AsOp()->gtOp2->gtVNPair; break; case GT_ADDR: // Is it an addr of a array index expression? { GenTree* addrArg = tree->AsOp()->gtOp1; if (addrArg->OperGet() == GT_IND) { // Is the LHS an array index expression? if (addrArg->gtFlags & GTF_IND_ARR_INDEX) { ArrayInfo arrInfo; bool b = GetArrayInfoMap()->Lookup(addrArg, &arrInfo); assert(b); CORINFO_CLASS_HANDLE elemTypeEq = EncodeElemType(arrInfo.m_elemType, arrInfo.m_elemStructType); ValueNum elemTypeEqVN = vnStore->VNForHandle(ssize_t(elemTypeEq), GTF_ICON_CLASS_HDL); ValueNum ptrToArrElemVN = vnStore->VNForFunc(TYP_BYREF, VNF_PtrToArrElem, elemTypeEqVN, // The rest are dummy arguments. vnStore->VNForNull(), vnStore->VNForNull(), vnStore->VNForNull()); tree->gtVNPair.SetBoth(ptrToArrElemVN); } } } break; #ifdef FEATURE_HW_INTRINSICS case GT_HWINTRINSIC: if (tree->AsHWIntrinsic()->OperIsMemoryStore()) { memoryHavoc |= memoryKindSet(GcHeap, ByrefExposed); } break; #endif // FEATURE_HW_INTRINSICS case GT_LOCKADD: case GT_XORR: case GT_XAND: case GT_XADD: case GT_XCHG: case GT_CMPXCHG: case GT_MEMORYBARRIER: case GT_STORE_DYN_BLK: { memoryHavoc |= memoryKindSet(GcHeap, ByrefExposed); } break; case GT_CALL: { GenTreeCall* call = tree->AsCall(); // Record that this loop contains a call AddContainsCallAllContainingLoops(mostNestedLoop); if (call->gtCallType == CT_HELPER) { CorInfoHelpFunc helpFunc = eeGetHelperNum(call->gtCallMethHnd); if (s_helperCallProperties.MutatesHeap(helpFunc)) { memoryHavoc |= memoryKindSet(GcHeap, ByrefExposed); } else if (s_helperCallProperties.MayRunCctor(helpFunc)) { // If the call is labeled as "Hoistable", then we've checked the // class that would be constructed, and it is not precise-init, so // the cctor will not be run by this call. Otherwise, it might be, // and might have arbitrary side effects. if ((tree->gtFlags & GTF_CALL_HOISTABLE) == 0) { memoryHavoc |= memoryKindSet(GcHeap, ByrefExposed); } } } else { memoryHavoc |= memoryKindSet(GcHeap, ByrefExposed); } break; } default: // All other gtOper node kinds, leave 'memoryHavoc' unchanged (i.e. false) assert(!tree->OperRequiresAsgFlag()); break; } } } } if (memoryHavoc != emptyMemoryKindSet) { // Record that all loops containing this block have this kind of memoryHavoc effects. optRecordLoopNestsMemoryHavoc(mostNestedLoop, memoryHavoc); } return true; } // Marks the containsCall information to "lnum" and any parent loops. void Compiler::AddContainsCallAllContainingLoops(unsigned lnum) { #if FEATURE_LOOP_ALIGN // If this is the inner most loop, reset the LOOP_ALIGN flag // because a loop having call will not likely to benefit from // alignment if (optLoopTable[lnum].lpChild == BasicBlock::NOT_IN_LOOP) { BasicBlock* top = optLoopTable[lnum].lpTop; top->unmarkLoopAlign(this DEBUG_ARG("Loop with call")); } #endif assert(0 <= lnum && lnum < optLoopCount); while (lnum != BasicBlock::NOT_IN_LOOP) { optLoopTable[lnum].lpFlags |= LPFLG_CONTAINS_CALL; lnum = optLoopTable[lnum].lpParent; } } // Adds the variable liveness information for 'blk' to 'this' LoopDsc void Compiler::LoopDsc::AddVariableLiveness(Compiler* comp, BasicBlock* blk) { VarSetOps::UnionD(comp, this->lpVarInOut, blk->bbLiveIn); VarSetOps::UnionD(comp, this->lpVarInOut, blk->bbLiveOut); VarSetOps::UnionD(comp, this->lpVarUseDef, blk->bbVarUse); VarSetOps::UnionD(comp, this->lpVarUseDef, blk->bbVarDef); } // Adds the variable liveness information for 'blk' to "lnum" and any parent loops. void Compiler::AddVariableLivenessAllContainingLoops(unsigned lnum, BasicBlock* blk) { assert(0 <= lnum && lnum < optLoopCount); while (lnum != BasicBlock::NOT_IN_LOOP) { optLoopTable[lnum].AddVariableLiveness(this, blk); lnum = optLoopTable[lnum].lpParent; } } // Adds "fldHnd" to the set of modified fields of "lnum" and any parent loops. void Compiler::AddModifiedFieldAllContainingLoops(unsigned lnum, CORINFO_FIELD_HANDLE fldHnd, FieldKindForVN fieldKind) { assert(0 <= lnum && lnum < optLoopCount); while (lnum != BasicBlock::NOT_IN_LOOP) { optLoopTable[lnum].AddModifiedField(this, fldHnd, fieldKind); lnum = optLoopTable[lnum].lpParent; } } // Adds "elemType" to the set of modified array element types of "lnum" and any parent loops. void Compiler::AddModifiedElemTypeAllContainingLoops(unsigned lnum, CORINFO_CLASS_HANDLE elemClsHnd) { assert(0 <= lnum && lnum < optLoopCount); while (lnum != BasicBlock::NOT_IN_LOOP) { optLoopTable[lnum].AddModifiedElemType(this, elemClsHnd); lnum = optLoopTable[lnum].lpParent; } } //------------------------------------------------------------------------------ // optRemoveRangeCheck : Given an indexing node, mark it as not needing a range check. // // Arguments: // check - Range check tree, the raw CHECK node (ARRAY, SIMD or HWINTRINSIC). // comma - GT_COMMA to which the "check" belongs, "nullptr" if the check is a standalone one. // stmt - Statement the indexing nodes belong to. // // Return Value: // Rewritten "check" - no-op if it has no side effects or the tree that contains them. // // Assumptions: // This method is capable of removing checks of two kinds: COMMA-based and standalone top-level ones. // In case of a COMMA-based check, "check" must be a non-null first operand of a non-null COMMA. // In case of a standalone check, "comma" must be null and "check" - "stmt"'s root. // GenTree* Compiler::optRemoveRangeCheck(GenTreeBoundsChk* check, GenTree* comma, Statement* stmt) { #if !REARRANGE_ADDS noway_assert(!"can't remove range checks without REARRANGE_ADDS right now"); #endif noway_assert(stmt != nullptr); noway_assert((comma != nullptr && comma->OperIs(GT_COMMA) && comma->gtGetOp1() == check) || (check != nullptr && check->OperIs(GT_BOUNDS_CHECK) && comma == nullptr)); noway_assert(check->OperIs(GT_BOUNDS_CHECK)); GenTree* tree = comma != nullptr ? comma : check; #ifdef DEBUG if (verbose) { printf("Before optRemoveRangeCheck:\n"); gtDispTree(tree); } #endif // Extract side effects GenTree* sideEffList = nullptr; gtExtractSideEffList(check, &sideEffList, GTF_ASG); if (sideEffList != nullptr) { // We've got some side effects. if (tree->OperIs(GT_COMMA)) { // Make the comma handle them. tree->AsOp()->gtOp1 = sideEffList; } else { // Make the statement execute them instead of the check. stmt->SetRootNode(sideEffList); tree = sideEffList; } } else { check->gtBashToNOP(); } if (tree->OperIs(GT_COMMA)) { // TODO-CQ: We should also remove the GT_COMMA, but in any case we can no longer CSE the GT_COMMA. tree->gtFlags |= GTF_DONT_CSE; } gtUpdateSideEffects(stmt, tree); // Recalculate the GetCostSz(), etc... gtSetStmtInfo(stmt); // Re-thread the nodes if necessary if (fgStmtListThreaded) { fgSetStmtSeq(stmt); } #ifdef DEBUG if (verbose) { // gtUpdateSideEffects can update the side effects for ancestors in the tree, so display the whole statement // tree, not just the sub-tree. printf("After optRemoveRangeCheck for [%06u]:\n", dspTreeID(tree)); gtDispTree(stmt->GetRootNode()); } #endif return check; } //------------------------------------------------------------------------------ // optRemoveStandaloneRangeCheck : A thin wrapper over optRemoveRangeCheck that removes standalone checks. // // Arguments: // check - The standalone top-level CHECK node. // stmt - The statement "check" is a root node of. // // Return Value: // If "check" has no side effects, it is retuned, bashed to a no-op. // If it has side effects, the tree that executes them is returned. // GenTree* Compiler::optRemoveStandaloneRangeCheck(GenTreeBoundsChk* check, Statement* stmt) { assert(check != nullptr); assert(stmt != nullptr); assert(check == stmt->GetRootNode()); return optRemoveRangeCheck(check, nullptr, stmt); } //------------------------------------------------------------------------------ // optRemoveCommaBasedRangeCheck : A thin wrapper over optRemoveRangeCheck that removes COMMA-based checks. // // Arguments: // comma - GT_COMMA of which the first operand is the CHECK to be removed. // stmt - The statement "comma" belongs to. // void Compiler::optRemoveCommaBasedRangeCheck(GenTree* comma, Statement* stmt) { assert(comma != nullptr && comma->OperIs(GT_COMMA)); assert(stmt != nullptr); assert(comma->gtGetOp1()->OperIs(GT_BOUNDS_CHECK)); optRemoveRangeCheck(comma->gtGetOp1()->AsBoundsChk(), comma, stmt); } /***************************************************************************** * Return the scale in an array reference, given a pointer to the * multiplication node. */ ssize_t Compiler::optGetArrayRefScaleAndIndex(GenTree* mul, GenTree** pIndex DEBUGARG(bool bRngChk)) { assert(mul); assert(mul->gtOper == GT_MUL || mul->gtOper == GT_LSH); assert(mul->AsOp()->gtOp2->IsCnsIntOrI()); ssize_t scale = mul->AsOp()->gtOp2->AsIntConCommon()->IconValue(); if (mul->gtOper == GT_LSH) { scale = ((ssize_t)1) << scale; } GenTree* index = mul->AsOp()->gtOp1; if (index->gtOper == GT_MUL && index->AsOp()->gtOp2->IsCnsIntOrI()) { // case of two cascading multiplications for constant int (e.g. * 20 morphed to * 5 * 4): // When index->gtOper is GT_MUL and index->AsOp()->gtOp2->gtOper is GT_CNS_INT (i.e. * 5), // we can bump up the scale from 4 to 5*4, and then change index to index->AsOp()->gtOp1. // Otherwise, we cannot optimize it. We will simply keep the original scale and index. scale *= index->AsOp()->gtOp2->AsIntConCommon()->IconValue(); index = index->AsOp()->gtOp1; } assert(!bRngChk || index->gtOper != GT_COMMA); if (pIndex) { *pIndex = index; } return scale; } //----------------------------------------------------------------------------- // OptTestInfo: Member of OptBoolsDsc struct used to test if a GT_JTRUE or GT_RETURN node // is a boolean comparison // struct OptTestInfo { GenTree* testTree; // The root node of basic block with GT_JTRUE or GT_RETURN type to check boolean condition on GenTree* compTree; // The compare node (i.e. GT_EQ or GT_NE node) of the testTree bool isBool; // If the compTree is boolean expression }; //----------------------------------------------------------------------------- // OptBoolsDsc: Descriptor used for Boolean Optimization // class OptBoolsDsc { public: OptBoolsDsc(BasicBlock* b1, BasicBlock* b2, Compiler* comp) { m_b1 = b1; m_b2 = b2; m_b3 = nullptr; m_comp = comp; } private: BasicBlock* m_b1; // The first basic block with the BBJ_COND conditional jump type BasicBlock* m_b2; // The next basic block of m_b1. Either BBJ_COND or BBJ_RETURN type BasicBlock* m_b3; // m_b1->bbJumpDest. Null if m_b2 is not a return block. Compiler* m_comp; // The pointer to the Compiler instance OptTestInfo m_testInfo1; // The first test info OptTestInfo m_testInfo2; // The second test info GenTree* m_t3; // The root node of the first statement of m_b3 GenTree* m_c1; // The first operand of m_testInfo1.compTree GenTree* m_c2; // The first operand of m_testInfo2.compTree bool m_sameTarget; // if m_b1 and m_b2 jumps to the same destination genTreeOps m_foldOp; // The fold operator (e.g., GT_AND or GT_OR) var_types m_foldType; // The type of the folded tree genTreeOps m_cmpOp; // The comparison operator (e.g., GT_EQ or GT_NE) public: bool optOptimizeBoolsCondBlock(); bool optOptimizeBoolsReturnBlock(BasicBlock* b3); #ifdef DEBUG void optOptimizeBoolsGcStress(); #endif private: Statement* optOptimizeBoolsChkBlkCond(); GenTree* optIsBoolComp(OptTestInfo* pOptTest); bool optOptimizeBoolsChkTypeCostCond(); void optOptimizeBoolsUpdateTrees(); }; //----------------------------------------------------------------------------- // optOptimizeBoolsCondBlock: Optimize boolean when bbJumpKind of both m_b1 and m_b2 are BBJ_COND // // Returns: // true if boolean optimization is done and m_b1 and m_b2 are folded into m_b1, else false. // // Notes: // m_b1 and m_b2 are set on entry. // // Case 1: if b1.bbJumpDest == b2.bbJumpDest, it transforms // B1 : brtrue(t1, Bx) // B2 : brtrue(t2, Bx) // B3 : // to // B1 : brtrue(t1|t2, BX) // B3 : // // For example, (x == 0 && y == 0 && z == 0) generates // B1: GT_JTRUE (BBJ_COND), jump to B4 // B2: GT_JTRUE (BBJ_COND), jump to B4 // B3: GT_RETURN (BBJ_RETURN) // B4: GT_RETURN (BBJ_RETURN) // and B1 and B2 are folded into B1: // B1: GT_JTRUE (BBJ_COND), jump to B4 // B3: GT_RETURN (BBJ_RETURN) // B4: GT_RETURN (BBJ_RETURN) // // Case 2: if B1.bbJumpDest == B2->bbNext, it transforms // B1 : brtrue(t1, B3) // B2 : brtrue(t2, Bx) // B3 : // to // B1 : brtrue((!t1) && t2, Bx) // B3 : // bool OptBoolsDsc::optOptimizeBoolsCondBlock() { assert(m_b1 != nullptr && m_b2 != nullptr && m_b3 == nullptr); // Check if m_b1 and m_b2 jump to the same target and get back pointers to m_testInfo1 and t2 tree nodes m_t3 = nullptr; // Check if m_b1 and m_b2 have the same bbJumpDest if (m_b1->bbJumpDest == m_b2->bbJumpDest) { // Given the following sequence of blocks : // B1: brtrue(t1, BX) // B2: brtrue(t2, BX) // B3: // we will try to fold it to : // B1: brtrue(t1|t2, BX) // B3: m_sameTarget = true; } else if (m_b1->bbJumpDest == m_b2->bbNext) { // Given the following sequence of blocks : // B1: brtrue(t1, B3) // B2: brtrue(t2, BX) // B3: // we will try to fold it to : // B1: brtrue((!t1)&&t2, BX) // B3: m_sameTarget = false; } else { return false; } Statement* const s1 = optOptimizeBoolsChkBlkCond(); if (s1 == nullptr) { return false; } // Find the branch conditions of m_b1 and m_b2 m_c1 = optIsBoolComp(&m_testInfo1); if (m_c1 == nullptr) { return false; } m_c2 = optIsBoolComp(&m_testInfo2); if (m_c2 == nullptr) { return false; } // Find the type and cost conditions of m_testInfo1 and m_testInfo2 if (!optOptimizeBoolsChkTypeCostCond()) { return false; } // Get the fold operator and the comparison operator genTreeOps foldOp; genTreeOps cmpOp; var_types foldType = m_c1->TypeGet(); if (varTypeIsGC(foldType)) { foldType = TYP_I_IMPL; } assert(m_testInfo1.compTree->gtOper == GT_EQ || m_testInfo1.compTree->gtOper == GT_NE); if (m_sameTarget) { // Both conditions must be the same if (m_testInfo1.compTree->gtOper != m_testInfo2.compTree->gtOper) { return false; } if (m_testInfo1.compTree->gtOper == GT_EQ) { // t1:c1==0 t2:c2==0 ==> Branch to BX if either value is 0 // So we will branch to BX if (c1&c2)==0 foldOp = GT_AND; cmpOp = GT_EQ; } else { // t1:c1!=0 t2:c2!=0 ==> Branch to BX if either value is non-0 // So we will branch to BX if (c1|c2)!=0 foldOp = GT_OR; cmpOp = GT_NE; } } else { // The m_b1 condition must be the reverse of the m_b2 condition because the only operators // that we will see here are GT_EQ and GT_NE. So, if they are not the same, we have one of each. if (m_testInfo1.compTree->gtOper == m_testInfo2.compTree->gtOper) { return false; } if (m_testInfo1.compTree->gtOper == GT_EQ) { // t1:c1==0 t2:c2!=0 ==> Branch to BX if both values are non-0 // So we will branch to BX if (c1&c2)!=0 foldOp = GT_AND; cmpOp = GT_NE; } else { // t1:c1!=0 t2:c2==0 ==> Branch to BX if both values are 0 // So we will branch to BX if (c1|c2)==0 foldOp = GT_OR; cmpOp = GT_EQ; } } // Anding requires both values to be 0 or 1 if ((foldOp == GT_AND) && (!m_testInfo1.isBool || !m_testInfo2.isBool)) { return false; } // // Now update the trees // m_foldOp = foldOp; m_foldType = foldType; m_cmpOp = cmpOp; optOptimizeBoolsUpdateTrees(); #ifdef DEBUG if (m_comp->verbose) { printf("Folded %sboolean conditions of " FMT_BB " and " FMT_BB " to :\n", m_c2->OperIsLeaf() ? "" : "non-leaf ", m_b1->bbNum, m_b2->bbNum); m_comp->gtDispStmt(s1); printf("\n"); } #endif // Return true to continue the bool optimization for the rest of the BB chain return true; } //----------------------------------------------------------------------------- // optOptimizeBoolsChkBlkCond: Checks block conditions if it can be boolean optimized // // Return: // If all conditions pass, returns the last statement of m_b1, else return nullptr. // // Notes: // This method checks if the second (and third block for cond/return/return case) contains only one statement, // and checks if tree operators are of the right type, e.g, GT_JTRUE, GT_RETURN. // // On entry, m_b1, m_b2 are set and m_b3 is set for cond/return/return case. // If it passes all the conditions, m_testInfo1.testTree, m_testInfo2.testTree and m_t3 are set // to the root nodes of m_b1, m_b2 and m_b3 each. // SameTarget is also updated to true if m_b1 and m_b2 jump to the same destination. // Statement* OptBoolsDsc::optOptimizeBoolsChkBlkCond() { assert(m_b1 != nullptr && m_b2 != nullptr); bool optReturnBlock = false; if (m_b3 != nullptr) { optReturnBlock = true; } // Find the block conditions of m_b1 and m_b2 if (m_b2->countOfInEdges() > 1 || (optReturnBlock && m_b3->countOfInEdges() > 1)) { return nullptr; } // Find the condition for the first block Statement* s1 = m_b1->lastStmt(); GenTree* testTree1 = s1->GetRootNode(); assert(testTree1->gtOper == GT_JTRUE); // The second and the third block must contain a single statement Statement* s2 = m_b2->firstStmt(); if (s2->GetPrevStmt() != s2) { return nullptr; } GenTree* testTree2 = s2->GetRootNode(); if (!optReturnBlock) { assert(testTree2->gtOper == GT_JTRUE); } else { if (testTree2->gtOper != GT_RETURN) { return nullptr; } Statement* s3 = m_b3->firstStmt(); if (s3->GetPrevStmt() != s3) { return nullptr; } GenTree* testTree3 = s3->GetRootNode(); if (testTree3->gtOper != GT_RETURN) { return nullptr; } if (!varTypeIsIntegral(testTree2->TypeGet()) || !varTypeIsIntegral(testTree3->TypeGet())) { return nullptr; } // The third block is Return with "CNS_INT int 0/1" if (testTree3->AsOp()->gtOp1->gtOper != GT_CNS_INT) { return nullptr; } if (testTree3->AsOp()->gtOp1->gtType != TYP_INT) { return nullptr; } m_t3 = testTree3; } m_testInfo1.testTree = testTree1; m_testInfo2.testTree = testTree2; return s1; } //----------------------------------------------------------------------------- // optOptimizeBoolsChkTypeCostCond: Checks if type conditions meet the folding condition, and // if cost to fold is not too expensive // // Return: // True if it meets type conditions and cost conditions. Else false. // bool OptBoolsDsc::optOptimizeBoolsChkTypeCostCond() { assert(m_testInfo1.compTree->OperIs(GT_EQ, GT_NE) && m_testInfo1.compTree->AsOp()->gtOp1 == m_c1); assert(m_testInfo2.compTree->OperIs(GT_EQ, GT_NE) && m_testInfo2.compTree->AsOp()->gtOp1 == m_c2); // // Leave out floats where the bit-representation is more complicated // - there are two representations for 0. // if (varTypeIsFloating(m_c1->TypeGet()) || varTypeIsFloating(m_c2->TypeGet())) { return false; } // Make sure the types involved are of the same sizes if (genTypeSize(m_c1->TypeGet()) != genTypeSize(m_c2->TypeGet())) { return false; } if (genTypeSize(m_testInfo1.compTree->TypeGet()) != genTypeSize(m_testInfo2.compTree->TypeGet())) { return false; } #ifdef TARGET_ARMARCH // Skip the small operand which we cannot encode. if (varTypeIsSmall(m_c1->TypeGet())) return false; #endif // The second condition must not contain side effects if (m_c2->gtFlags & GTF_GLOB_EFFECT) { return false; } // The second condition must not be too expensive m_comp->gtPrepareCost(m_c2); if (m_c2->GetCostEx() > 12) { return false; } return true; } //----------------------------------------------------------------------------- // optOptimizeBoolsUpdateTrees: Fold the trees based on fold type and comparison type, // update the edges, unlink removed blocks and update loop table // void OptBoolsDsc::optOptimizeBoolsUpdateTrees() { assert(m_b1 != nullptr && m_b2 != nullptr); bool optReturnBlock = false; if (m_b3 != nullptr) { optReturnBlock = true; } assert(m_foldOp != NULL && m_foldType != NULL && m_c1 != nullptr && m_c2 != nullptr); GenTree* cmpOp1 = m_comp->gtNewOperNode(m_foldOp, m_foldType, m_c1, m_c2); if (m_testInfo1.isBool && m_testInfo2.isBool) { // When we 'OR'/'AND' two booleans, the result is boolean as well cmpOp1->gtFlags |= GTF_BOOLEAN; } GenTree* t1Comp = m_testInfo1.compTree; t1Comp->SetOper(m_cmpOp); t1Comp->AsOp()->gtOp1 = cmpOp1; t1Comp->AsOp()->gtOp2->gtType = m_foldType; // Could have been varTypeIsGC() if (optReturnBlock) { // Update tree when m_b1 is BBJ_COND and m_b2 and m_b3 are GT_RETURN (BBJ_RETURN) t1Comp->AsOp()->gtOp2->AsIntCon()->gtIconVal = 0; m_testInfo1.testTree->gtOper = GT_RETURN; m_testInfo1.testTree->gtType = m_testInfo2.testTree->gtType; // Update the return count of flow graph assert(m_comp->fgReturnCount >= 2); --m_comp->fgReturnCount; } #if FEATURE_SET_FLAGS // For comparisons against zero we will have the GTF_SET_FLAGS set // and this can cause an assert to fire in fgMoveOpsLeft(GenTree* tree) // during the CSE phase. // // So make sure to clear any GTF_SET_FLAGS bit on these operations // as they are no longer feeding directly into a comparisons against zero // Make sure that the GTF_SET_FLAGS bit is cleared. // Fix 388436 ARM JitStress WP7 m_c1->gtFlags &= ~GTF_SET_FLAGS; m_c2->gtFlags &= ~GTF_SET_FLAGS; // The new top level node that we just created does feed directly into // a comparison against zero, so set the GTF_SET_FLAGS bit so that // we generate an instruction that sets the flags, which allows us // to omit the cmp with zero instruction. // Request that the codegen for cmpOp1 sets the condition flags // when it generates the code for cmpOp1. // cmpOp1->gtRequestSetFlags(); #endif if (!optReturnBlock) { // Update edges if m_b1: BBJ_COND and m_b2: BBJ_COND flowList* edge1 = m_comp->fgGetPredForBlock(m_b1->bbJumpDest, m_b1); flowList* edge2; if (m_sameTarget) { edge2 = m_comp->fgGetPredForBlock(m_b2->bbJumpDest, m_b2); } else { edge2 = m_comp->fgGetPredForBlock(m_b2->bbNext, m_b2); m_comp->fgRemoveRefPred(m_b1->bbJumpDest, m_b1); m_b1->bbJumpDest = m_b2->bbJumpDest; m_comp->fgAddRefPred(m_b2->bbJumpDest, m_b1); } assert(edge1 != nullptr); assert(edge2 != nullptr); weight_t edgeSumMin = edge1->edgeWeightMin() + edge2->edgeWeightMin(); weight_t edgeSumMax = edge1->edgeWeightMax() + edge2->edgeWeightMax(); if ((edgeSumMax >= edge1->edgeWeightMax()) && (edgeSumMax >= edge2->edgeWeightMax())) { edge1->setEdgeWeights(edgeSumMin, edgeSumMax, m_b1->bbJumpDest); } else { edge1->setEdgeWeights(BB_ZERO_WEIGHT, BB_MAX_WEIGHT, m_b1->bbJumpDest); } } /* Modify the target of the conditional jump and update bbRefs and bbPreds */ if (optReturnBlock) { m_b1->bbJumpDest = nullptr; m_b1->bbJumpKind = BBJ_RETURN; #ifdef DEBUG m_b1->bbJumpSwt = m_b2->bbJumpSwt; #endif assert(m_b2->bbJumpKind == BBJ_RETURN); assert(m_b1->bbNext == m_b2); assert(m_b3 != nullptr); } else { assert(m_b1->bbJumpKind == BBJ_COND); assert(m_b2->bbJumpKind == BBJ_COND); assert(m_b1->bbJumpDest == m_b2->bbJumpDest); assert(m_b1->bbNext == m_b2); assert(m_b2->bbNext != nullptr); } if (!optReturnBlock) { // Update bbRefs and bbPreds // // Replace pred 'm_b2' for 'm_b2->bbNext' with 'm_b1' // Remove pred 'm_b2' for 'm_b2->bbJumpDest' m_comp->fgReplacePred(m_b2->bbNext, m_b2, m_b1); m_comp->fgRemoveRefPred(m_b2->bbJumpDest, m_b2); } // Get rid of the second block m_comp->fgUnlinkBlock(m_b2); m_b2->bbFlags |= BBF_REMOVED; // If m_b2 was the last block of a try or handler, update the EH table. m_comp->ehUpdateForDeletedBlock(m_b2); if (optReturnBlock) { // Get rid of the third block m_comp->fgUnlinkBlock(m_b3); m_b3->bbFlags |= BBF_REMOVED; // If m_b3 was the last block of a try or handler, update the EH table. m_comp->ehUpdateForDeletedBlock(m_b3); } // Update loop table m_comp->fgUpdateLoopsAfterCompacting(m_b1, m_b2); if (optReturnBlock) { m_comp->fgUpdateLoopsAfterCompacting(m_b1, m_b3); } } //----------------------------------------------------------------------------- // optOptimizeBoolsReturnBlock: Optimize boolean when m_b1 is BBJ_COND and m_b2 and m_b3 are BBJ_RETURN // // Arguments: // b3: Pointer to basic block b3 // // Returns: // true if boolean optimization is done and m_b1, m_b2 and m_b3 are folded into m_b1, else false. // // Notes: // m_b1, m_b2 and m_b3 of OptBoolsDsc are set on entry. // // if B1.bbJumpDest == b3, it transforms // B1 : brtrue(t1, B3) // B2 : ret(t2) // B3 : ret(0) // to // B1 : ret((!t1) && t2) // // For example, (x==0 && y==0) generates: // B1: GT_JTRUE (BBJ_COND), jumps to B3 // B2: GT_RETURN (BBJ_RETURN) // B3: GT_RETURN (BBJ_RETURN), // and it is folded into // B1: GT_RETURN (BBJ_RETURN) // bool OptBoolsDsc::optOptimizeBoolsReturnBlock(BasicBlock* b3) { assert(m_b1 != nullptr && m_b2 != nullptr); // m_b3 is set for cond/return/return case m_b3 = b3; m_sameTarget = false; Statement* const s1 = optOptimizeBoolsChkBlkCond(); if (s1 == nullptr) { return false; } // Find the branch conditions of m_b1 and m_b2 m_c1 = optIsBoolComp(&m_testInfo1); if (m_c1 == nullptr) { return false; } m_c2 = optIsBoolComp(&m_testInfo2); if (m_c2 == nullptr) { return false; } // Find the type and cost conditions of m_testInfo1 and m_testInfo2 if (!optOptimizeBoolsChkTypeCostCond()) { return false; } // Get the fold operator (m_foldOp, e.g., GT_OR/GT_AND) and // the comparison operator (m_cmpOp, e.g., GT_EQ/GT_NE) var_types foldType = m_c1->TypeGet(); if (varTypeIsGC(foldType)) { foldType = TYP_I_IMPL; } m_foldType = foldType; m_foldOp = GT_NONE; m_cmpOp = GT_NONE; genTreeOps foldOp; genTreeOps cmpOp; ssize_t it1val = m_testInfo1.compTree->AsOp()->gtOp2->AsIntCon()->gtIconVal; ssize_t it2val = m_testInfo2.compTree->AsOp()->gtOp2->AsIntCon()->gtIconVal; ssize_t it3val = m_t3->AsOp()->gtOp1->AsIntCon()->gtIconVal; if ((m_testInfo1.compTree->gtOper == GT_NE && m_testInfo2.compTree->gtOper == GT_EQ) && (it1val == 0 && it2val == 0 && it3val == 0)) { // Case: x == 0 && y == 0 // t1:c1!=0 t2:c2==0 t3:c3==0 // ==> true if (c1|c2)==0 foldOp = GT_OR; cmpOp = GT_EQ; } else if ((m_testInfo1.compTree->gtOper == GT_EQ && m_testInfo2.compTree->gtOper == GT_NE) && (it1val == 0 && it2val == 0 && it3val == 0)) { // Case: x == 1 && y ==1 // t1:c1!=1 t2:c2==1 t3:c3==0 is reversed from optIsBoolComp() to: t1:c1==0 t2:c2!=0 t3:c3==0 // ==> true if (c1&c2)!=0 foldOp = GT_AND; cmpOp = GT_NE; } else if ((m_testInfo1.compTree->gtOper == GT_EQ && m_testInfo2.compTree->gtOper == GT_EQ) && (it1val == 0 && it2val == 0 && it3val == 1)) { // Case: x == 0 || y == 0 // t1:c1==0 t2:c2==0 t3:c3==1 // ==> true if (c1&c2)==0 foldOp = GT_AND; cmpOp = GT_EQ; } else if ((m_testInfo1.compTree->gtOper == GT_NE && m_testInfo2.compTree->gtOper == GT_NE) && (it1val == 0 && it2val == 0 && it3val == 1)) { // Case: x == 1 || y == 1 // t1:c1==1 t2:c2==1 t3:c3==1 is reversed from optIsBoolComp() to: t1:c1!=0 t2:c2!=0 t3:c3==1 // ==> true if (c1|c2)!=0 foldOp = GT_OR; cmpOp = GT_NE; } else { // Require NOT operation for operand(s). Do Not fold. return false; } if ((foldOp == GT_AND || cmpOp == GT_NE) && (!m_testInfo1.isBool || !m_testInfo2.isBool)) { // x == 1 && y == 1: Skip cases where x or y is greather than 1, e.g., x=3, y=1 // x == 0 || y == 0: Skip cases where x and y have opposite bits set, e.g., x=2, y=1 // x == 1 || y == 1: Skip cases where either x or y is greater than 1, e.g., x=2, y=0 return false; } m_foldOp = foldOp; m_cmpOp = cmpOp; // Now update the trees optOptimizeBoolsUpdateTrees(); #ifdef DEBUG if (m_comp->verbose) { printf("Folded %sboolean conditions of " FMT_BB ", " FMT_BB " and " FMT_BB " to :\n", m_c2->OperIsLeaf() ? "" : "non-leaf ", m_b1->bbNum, m_b2->bbNum, m_b3->bbNum); m_comp->gtDispStmt(s1); printf("\n"); } #endif // Return true to continue the bool optimization for the rest of the BB chain return true; } //----------------------------------------------------------------------------- // optOptimizeBoolsGcStress: Replace x==null with (x|x)==0 if x is a GC-type. // This will stress code-gen and the emitter to make sure they support such trees. // #ifdef DEBUG void OptBoolsDsc::optOptimizeBoolsGcStress() { if (!m_comp->compStressCompile(m_comp->STRESS_OPT_BOOLS_GC, 20)) { return; } assert(m_b1->bbJumpKind == BBJ_COND); GenTree* cond = m_b1->lastStmt()->GetRootNode(); assert(cond->gtOper == GT_JTRUE); OptTestInfo test; test.testTree = cond; GenTree* comparand = optIsBoolComp(&test); if (comparand == nullptr || !varTypeIsGC(comparand->TypeGet())) { return; } GenTree* relop = test.compTree; bool isBool = test.isBool; if (comparand->gtFlags & (GTF_ASG | GTF_CALL | GTF_ORDER_SIDEEFF)) { return; } GenTree* comparandClone = m_comp->gtCloneExpr(comparand); noway_assert(relop->AsOp()->gtOp1 == comparand); genTreeOps oper = m_comp->compStressCompile(m_comp->STRESS_OPT_BOOLS_GC, 50) ? GT_OR : GT_AND; relop->AsOp()->gtOp1 = m_comp->gtNewOperNode(oper, TYP_I_IMPL, comparand, comparandClone); // Comparand type is already checked, and we have const int, there is no harm // morphing it into a TYP_I_IMPL. noway_assert(relop->AsOp()->gtOp2->gtOper == GT_CNS_INT); relop->AsOp()->gtOp2->gtType = TYP_I_IMPL; } #endif //----------------------------------------------------------------------------- // optIsBoolComp: Function used by folding of boolean conditionals // // Arguments: // pOptTest The test info for the test tree // // Return: // On success, return the first operand (gtOp1) of compTree, else return nullptr. // // Notes: // On entry, testTree is set. // On success, compTree is set to the compare node (i.e. GT_EQ or GT_NE) of the testTree. // isBool is set to true if the comparand (i.e., operand 1 of compTree is boolean. Otherwise, false. // // Given a GT_JTRUE or GT_RETURN node, this method checks if it is a boolean comparison // of the form "if (boolVal ==/!= 0/1)".This is translated into // a GT_EQ/GT_NE node with "opr1" being a boolean lclVar and "opr2" the const 0/1. // // When isBool == true, if the comparison was against a 1 (i.e true) // then we morph the tree by reversing the GT_EQ/GT_NE and change the 1 to 0. // GenTree* OptBoolsDsc::optIsBoolComp(OptTestInfo* pOptTest) { pOptTest->isBool = false; assert(pOptTest->testTree->gtOper == GT_JTRUE || pOptTest->testTree->gtOper == GT_RETURN); GenTree* cond = pOptTest->testTree->AsOp()->gtOp1; // The condition must be "!= 0" or "== 0" if ((cond->gtOper != GT_EQ) && (cond->gtOper != GT_NE)) { return nullptr; } // Return the compare node to the caller pOptTest->compTree = cond; // Get hold of the comparands GenTree* opr1 = cond->AsOp()->gtOp1; GenTree* opr2 = cond->AsOp()->gtOp2; if (opr2->gtOper != GT_CNS_INT) { return nullptr; } if (!opr2->IsIntegralConst(0) && !opr2->IsIntegralConst(1)) { return nullptr; } ssize_t ival2 = opr2->AsIntCon()->gtIconVal; // Is the value a boolean? // We can either have a boolean expression (marked GTF_BOOLEAN) or // a local variable that is marked as being boolean (lvIsBoolean) if (opr1->gtFlags & GTF_BOOLEAN) { pOptTest->isBool = true; } else if ((opr1->gtOper == GT_CNS_INT) && (opr1->IsIntegralConst(0) || opr1->IsIntegralConst(1))) { pOptTest->isBool = true; } else if (opr1->gtOper == GT_LCL_VAR) { // is it a boolean local variable? unsigned lclNum = opr1->AsLclVarCommon()->GetLclNum(); noway_assert(lclNum < m_comp->lvaCount); if (m_comp->lvaTable[lclNum].lvIsBoolean) { pOptTest->isBool = true; } } // Was our comparison against the constant 1 (i.e. true) if (ival2 == 1) { // If this is a boolean expression tree we can reverse the relop // and change the true to false. if (pOptTest->isBool) { m_comp->gtReverseCond(cond); opr2->AsIntCon()->gtIconVal = 0; } else { return nullptr; } } return opr1; } //----------------------------------------------------------------------------- // optOptimizeBools: Folds boolean conditionals for GT_JTRUE/GT_RETURN nodes // // Notes: // If the operand of GT_JTRUE/GT_RETURN node is GT_EQ/GT_NE of the form // "if (boolVal ==/!= 0/1)", the GT_EQ/GT_NE nodes are translated into a // GT_EQ/GT_NE node with // "op1" being a boolean GT_OR/GT_AND lclVar and // "op2" the const 0/1. // For example, the folded tree for the below boolean optimization is shown below: // Case 1: (x == 0 && y ==0) => (x | y) == 0 // * RETURN int // \--* EQ int // +--* OR int // | +--* LCL_VAR int V00 arg0 // | \--* LCL_VAR int V01 arg1 // \--* CNS_INT int 0 // // Case 2: (x == null && y == null) ==> (x | y) == 0 // * RETURN int // \-- * EQ int // + -- * OR long // | +-- * LCL_VAR ref V00 arg0 // | \-- * LCL_VAR ref V01 arg1 // \-- * CNS_INT long 0 // // Case 3: (x == 0 && y == 0 && z == 0) ==> ((x | y) | z) == 0 // * RETURN int // \-- * EQ int // + -- * OR int // | +-- * OR int // | | +-- * LCL_VAR int V00 arg0 // | | \-- * LCL_VAR int V01 arg1 // | \-- * LCL_VAR int V02 arg2 // \-- * CNS_INT int 0 // // Case 4: (x == 0 && y == 0 && z == 0 && w == 0) ==> (((x | y) | z) | w) == 0 // * RETURN int // \-- * EQ int // + * OR int // | +--* OR int // | | +--* OR int // | | | +--* LCL_VAR int V00 arg0 // | | | \--* LCL_VAR int V01 arg1 // | | \--* LCL_VAR int V02 arg2 // | \--* LCL_VAR int V03 arg3 // \--* CNS_INT int 0 // // Patterns that are not optimized include (x == 1 && y == 1), (x == 1 || y == 1), // (x == 0 || y == 0) because currently their comptree is not marked as boolean expression. // When m_foldOp == GT_AND or m_cmpOp == GT_NE, both compTrees must be boolean expression // in order to skip below cases when compTree is not boolean expression: // - x == 1 && y == 1 ==> (x&y)!=0: Skip cases where x or y is greather than 1, e.g., x=3, y=1 // - x == 1 || y == 1 ==> (x|y)!=0: Skip cases where either x or y is greater than 1, e.g., x=2, y=0 // - x == 0 || y == 0 ==> (x&y)==0: Skip cases where x and y have opposite bits set, e.g., x=2, y=1 // void Compiler::optOptimizeBools() { #ifdef DEBUG if (verbose) { printf("*************** In optOptimizeBools()\n"); if (verboseTrees) { printf("Blocks/Trees before phase\n"); fgDispBasicBlocks(true); } } #endif bool change; do { change = false; for (BasicBlock* const b1 : Blocks()) { // We're only interested in conditional jumps here if (b1->bbJumpKind != BBJ_COND) { continue; } // If there is no next block, we're done BasicBlock* b2 = b1->bbNext; if (b2 == nullptr) { break; } // The next block must not be marked as BBF_DONT_REMOVE if (b2->bbFlags & BBF_DONT_REMOVE) { continue; } OptBoolsDsc optBoolsDsc(b1, b2, this); // The next block needs to be a condition or return block. if (b2->bbJumpKind == BBJ_COND) { if ((b1->bbJumpDest != b2->bbJumpDest) && (b1->bbJumpDest != b2->bbNext)) { continue; } // When it is conditional jumps if (optBoolsDsc.optOptimizeBoolsCondBlock()) { change = true; } } else if (b2->bbJumpKind == BBJ_RETURN) { // Set b3 to b1 jump destination BasicBlock* b3 = b1->bbJumpDest; // b3 must not be marked as BBF_DONT_REMOVE if (b3->bbFlags & BBF_DONT_REMOVE) { continue; } // b3 must be RETURN type if (b3->bbJumpKind != BBJ_RETURN) { continue; } if (optBoolsDsc.optOptimizeBoolsReturnBlock(b3)) { change = true; } } else { #ifdef DEBUG optBoolsDsc.optOptimizeBoolsGcStress(); #endif } } } while (change); #ifdef DEBUG fgDebugCheckBBlist(); #endif } typedef JitHashTable<unsigned, JitSmallPrimitiveKeyFuncs<unsigned>, unsigned> LclVarRefCounts; //------------------------------------------------------------------------------------------ // optRemoveRedundantZeroInits: Remove redundant zero intializations. // // Notes: // This phase iterates over basic blocks starting with the first basic block until there is no unique // basic block successor or until it detects a loop. It keeps track of local nodes it encounters. // When it gets to an assignment to a local variable or a local field, it checks whether the assignment // is the first reference to the local (or to the parent of the local field), and, if so, // it may do one of two optimizations: // 1. If the following conditions are true: // the local is untracked, // the rhs of the assignment is 0, // the local is guaranteed to be fully initialized in the prolog, // then the explicit zero initialization is removed. // 2. If the following conditions are true: // the assignment is to a local (and not a field), // the local is not lvLiveInOutOfHndlr or no exceptions can be thrown between the prolog and the assignment, // either the local has no gc pointers or there are no gc-safe points between the prolog and the assignment, // then the local is marked with lvHasExplicitInit which tells the codegen not to insert zero initialization // for this local in the prolog. void Compiler::optRemoveRedundantZeroInits() { #ifdef DEBUG if (verbose) { printf("*************** In optRemoveRedundantZeroInits()\n"); } #endif // DEBUG CompAllocator allocator(getAllocator(CMK_ZeroInit)); LclVarRefCounts refCounts(allocator); BitVecTraits bitVecTraits(lvaCount, this); BitVec zeroInitLocals = BitVecOps::MakeEmpty(&bitVecTraits); bool hasGCSafePoint = false; bool canThrow = false; assert(fgStmtListThreaded); for (BasicBlock* block = fgFirstBB; (block != nullptr) && ((block->bbFlags & BBF_MARKED) == 0); block = block->GetUniqueSucc()) { block->bbFlags |= BBF_MARKED; CompAllocator allocator(getAllocator(CMK_ZeroInit)); LclVarRefCounts defsInBlock(allocator); bool removedTrackedDefs = false; for (Statement* stmt = block->FirstNonPhiDef(); stmt != nullptr;) { Statement* next = stmt->GetNextStmt(); for (GenTree* const tree : stmt->TreeList()) { if (((tree->gtFlags & GTF_CALL) != 0)) { hasGCSafePoint = true; } if ((tree->gtFlags & GTF_EXCEPT) != 0) { canThrow = true; } switch (tree->gtOper) { case GT_LCL_VAR: case GT_LCL_FLD: case GT_LCL_VAR_ADDR: case GT_LCL_FLD_ADDR: { unsigned lclNum = tree->AsLclVarCommon()->GetLclNum(); unsigned* pRefCount = refCounts.LookupPointer(lclNum); if (pRefCount != nullptr) { *pRefCount = (*pRefCount) + 1; } else { refCounts.Set(lclNum, 1); } if ((tree->gtFlags & GTF_VAR_DEF) == 0) { break; } // We need to count the number of tracked var defs in the block // so that we can update block->bbVarDef if we remove any tracked var defs. LclVarDsc* const lclDsc = lvaGetDesc(lclNum); if (lclDsc->lvTracked) { unsigned* pDefsCount = defsInBlock.LookupPointer(lclNum); if (pDefsCount != nullptr) { *pDefsCount = (*pDefsCount) + 1; } else { defsInBlock.Set(lclNum, 1); } } else if (varTypeIsStruct(lclDsc) && ((tree->gtFlags & GTF_VAR_USEASG) == 0) && lvaGetPromotionType(lclDsc) != PROMOTION_TYPE_NONE) { for (unsigned i = lclDsc->lvFieldLclStart; i < lclDsc->lvFieldLclStart + lclDsc->lvFieldCnt; ++i) { if (lvaGetDesc(i)->lvTracked) { unsigned* pDefsCount = defsInBlock.LookupPointer(i); if (pDefsCount != nullptr) { *pDefsCount = (*pDefsCount) + 1; } else { defsInBlock.Set(i, 1); } } } } break; } case GT_ASG: { GenTreeOp* treeOp = tree->AsOp(); GenTreeLclVarCommon* lclVar; bool isEntire; if (!tree->DefinesLocal(this, &lclVar, &isEntire)) { break; } const unsigned lclNum = lclVar->GetLclNum(); LclVarDsc* const lclDsc = lvaGetDesc(lclNum); unsigned* pRefCount = refCounts.LookupPointer(lclNum); // pRefCount can't be null because the local node on the lhs of the assignment // must have already been seen. assert(pRefCount != nullptr); if (*pRefCount != 1) { break; } unsigned parentRefCount = 0; if (lclDsc->lvIsStructField && refCounts.Lookup(lclDsc->lvParentLcl, &parentRefCount) && (parentRefCount != 0)) { break; } unsigned fieldRefCount = 0; if (lclDsc->lvPromoted) { for (unsigned i = lclDsc->lvFieldLclStart; (fieldRefCount == 0) && (i < lclDsc->lvFieldLclStart + lclDsc->lvFieldCnt); ++i) { refCounts.Lookup(i, &fieldRefCount); } } if (fieldRefCount != 0) { break; } // The local hasn't been referenced before this assignment. bool removedExplicitZeroInit = false; if (treeOp->gtGetOp2()->IsIntegralConst(0)) { bool bbInALoop = (block->bbFlags & BBF_BACKWARD_JUMP) != 0; bool bbIsReturn = block->bbJumpKind == BBJ_RETURN; if (!bbInALoop || bbIsReturn) { if (BitVecOps::IsMember(&bitVecTraits, zeroInitLocals, lclNum) || (lclDsc->lvIsStructField && BitVecOps::IsMember(&bitVecTraits, zeroInitLocals, lclDsc->lvParentLcl)) || ((!lclDsc->lvTracked || !isEntire) && !fgVarNeedsExplicitZeroInit(lclNum, bbInALoop, bbIsReturn))) { // We are guaranteed to have a zero initialization in the prolog or a // dominating explicit zero initialization and the local hasn't been redefined // between the prolog and this explicit zero initialization so the assignment // can be safely removed. if (tree == stmt->GetRootNode()) { fgRemoveStmt(block, stmt); removedExplicitZeroInit = true; lclDsc->lvSuppressedZeroInit = 1; if (lclDsc->lvTracked) { removedTrackedDefs = true; unsigned* pDefsCount = defsInBlock.LookupPointer(lclNum); *pDefsCount = (*pDefsCount) - 1; } } } if (isEntire) { BitVecOps::AddElemD(&bitVecTraits, zeroInitLocals, lclNum); } *pRefCount = 0; } } if (!removedExplicitZeroInit && isEntire && (!canThrow || !lclDsc->lvLiveInOutOfHndlr)) { // If compMethodRequiresPInvokeFrame() returns true, lower may later // insert a call to CORINFO_HELP_INIT_PINVOKE_FRAME which is a gc-safe point. if (!lclDsc->HasGCPtr() || (!GetInterruptible() && !hasGCSafePoint && !compMethodRequiresPInvokeFrame())) { // The local hasn't been used and won't be reported to the gc between // the prolog and this explicit intialization. Therefore, it doesn't // require zero initialization in the prolog. lclDsc->lvHasExplicitInit = 1; JITDUMP("Marking " FMT_LP " as having an explicit init\n", lclNum); } } break; } default: break; } } stmt = next; } if (removedTrackedDefs) { LclVarRefCounts::KeyIterator iter(defsInBlock.Begin()); LclVarRefCounts::KeyIterator end(defsInBlock.End()); for (; !iter.Equal(end); iter++) { unsigned int lclNum = iter.Get(); if (defsInBlock[lclNum] == 0) { VarSetOps::RemoveElemD(this, block->bbVarDef, lvaGetDesc(lclNum)->lvVarIndex); } } } } for (BasicBlock* block = fgFirstBB; (block != nullptr) && ((block->bbFlags & BBF_MARKED) != 0); block = block->GetUniqueSucc()) { block->bbFlags &= ~BBF_MARKED; } } #ifdef DEBUG //------------------------------------------------------------------------ // optAnyChildNotRemoved: Recursively check the child loops of a loop to see if any of them // are still live (that is, not marked as LPFLG_REMOVED). This check is done when we are // removing a parent, just to notify that there is something odd about leaving a live child. // // Arguments: // loopNum - the loop number to check // bool Compiler::optAnyChildNotRemoved(unsigned loopNum) { assert(loopNum < optLoopCount); // Now recursively mark the children. for (BasicBlock::loopNumber l = optLoopTable[loopNum].lpChild; // l != BasicBlock::NOT_IN_LOOP; // l = optLoopTable[l].lpSibling) { if ((optLoopTable[l].lpFlags & LPFLG_REMOVED) == 0) { return true; } if (optAnyChildNotRemoved(l)) { return true; } } // All children were removed return false; } #endif // DEBUG //------------------------------------------------------------------------ // optMarkLoopRemoved: Mark the specified loop as removed (some optimization, such as unrolling, has made the // loop no longer exist). Note that only the given loop is marked as being removed; if it has any children, // they are not touched (but a warning message is output to the JitDump). // // Arguments: // loopNum - the loop number to remove // void Compiler::optMarkLoopRemoved(unsigned loopNum) { JITDUMP("Marking loop " FMT_LP " removed\n", loopNum); assert(loopNum < optLoopCount); LoopDsc& loop = optLoopTable[loopNum]; loop.lpFlags |= LPFLG_REMOVED; #ifdef DEBUG if (optAnyChildNotRemoved(loopNum)) { JITDUMP("Removed loop " FMT_LP " has one or more live children\n", loopNum); } // Note: we can't call `fgDebugCheckLoopTable()` here because if there are live children, it will assert. // Assume the caller is going to fix up the table and `bbNatLoopNum` block annotations before the next time // `fgDebugCheckLoopTable()` is called. #endif // DEBUG }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Optimizer XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ #include "jitpch.h" #ifdef _MSC_VER #pragma hdrstop #endif /*****************************************************************************/ void Compiler::optInit() { optLoopsMarked = false; fgHasLoops = false; loopAlignCandidates = 0; /* Initialize the # of tracked loops to 0 */ optLoopCount = 0; optLoopTable = nullptr; optCurLoopEpoch = 0; #ifdef DEBUG loopsAligned = 0; #endif /* Keep track of the number of calls and indirect calls made by this method */ optCallCount = 0; optIndirectCallCount = 0; optNativeCallCount = 0; optAssertionCount = 0; optAssertionDep = nullptr; optCSEstart = BAD_VAR_NUM; optCSEcount = 0; } DataFlow::DataFlow(Compiler* pCompiler) : m_pCompiler(pCompiler) { } //------------------------------------------------------------------------ // optSetBlockWeights: adjust block weights, as follows: // 1. A block that is not reachable from the entry block is marked "run rarely". // 2. If we're not using profile weights, then any block with a non-zero weight // that doesn't dominate all the return blocks has its weight dropped in half // (but only if the first block *does* dominate all the returns). // // Notes: // Depends on dominators, and fgReturnBlocks being set. // PhaseStatus Compiler::optSetBlockWeights() { noway_assert(opts.OptimizationEnabled()); assert(fgDomsComputed); assert(fgReturnBlocksComputed); #ifdef DEBUG bool changed = false; #endif bool firstBBDominatesAllReturns = true; const bool usingProfileWeights = fgIsUsingProfileWeights(); for (BasicBlock* const block : Blocks()) { /* Blocks that can't be reached via the first block are rarely executed */ if (!fgReachable(fgFirstBB, block)) { block->bbSetRunRarely(); } if (!usingProfileWeights && firstBBDominatesAllReturns) { // If the weight is already zero (and thus rarely run), there's no point scaling it. if (block->bbWeight != BB_ZERO_WEIGHT) { // If the block dominates all return blocks, leave the weight alone. Otherwise, // scale the weight by 0.5 as a heuristic that some other path gets some of the dynamic flow. // Note that `optScaleLoopBlocks` has a similar heuristic for loop blocks that don't dominate // their loop back edge. bool blockDominatesAllReturns = true; // Assume that we will dominate for (BasicBlockList* retBlocks = fgReturnBlocks; retBlocks != nullptr; retBlocks = retBlocks->next) { if (!fgDominate(block, retBlocks->block)) { blockDominatesAllReturns = false; break; } } if (block == fgFirstBB) { firstBBDominatesAllReturns = blockDominatesAllReturns; // Don't scale the weight of the first block, since it is guaranteed to execute. // If the first block does not dominate all the returns, we won't scale any of the function's // block weights. } else { // If we are not using profile weight then we lower the weight // of blocks that do not dominate a return block // if (!blockDominatesAllReturns) { INDEBUG(changed = true); // TODO-Cleanup: we should use: // block->scaleBBWeight(0.5); // since we are inheriting "from ourselves", but that leads to asm diffs due to minutely // different floating-point value in the calculation, and some code that compares weights // for equality. block->inheritWeightPercentage(block, 50); } } } } } #if DEBUG if (changed && verbose) { printf("\nAfter optSetBlockWeights:\n"); fgDispBasicBlocks(); printf("\n"); } /* Check that the flowgraph data (bbNum, bbRefs, bbPreds) is up-to-date */ fgDebugCheckBBlist(); #endif return PhaseStatus::MODIFIED_EVERYTHING; } //------------------------------------------------------------------------ // optScaleLoopBlocks: Scale the weight of loop blocks from 'begBlk' to 'endBlk'. // // Arguments: // begBlk - first block of range. Must be marked as a loop head (BBF_LOOP_HEAD). // endBlk - last block of range (inclusive). Must be reachable from `begBlk`. // // Operation: // Calculate the 'loop weight'. This is the amount to scale the weight of each block in the loop. // Our heuristic is that loops are weighted eight times more than straight-line code // (scale factor is BB_LOOP_WEIGHT_SCALE). If the loops are all properly formed this gives us these weights: // // 1 -- non-loop basic block // 8 -- single loop nesting // 64 -- double loop nesting // 512 -- triple loop nesting // void Compiler::optScaleLoopBlocks(BasicBlock* begBlk, BasicBlock* endBlk) { noway_assert(begBlk->bbNum <= endBlk->bbNum); noway_assert(begBlk->isLoopHead()); noway_assert(fgReachable(begBlk, endBlk)); noway_assert(!opts.MinOpts()); #ifdef DEBUG if (verbose) { printf("\nMarking a loop from " FMT_BB " to " FMT_BB, begBlk->bbNum, endBlk->bbNum); } #endif // Build list of back edges for block begBlk. flowList* backedgeList = nullptr; for (BasicBlock* const predBlock : begBlk->PredBlocks()) { // Is this a back edge? if (predBlock->bbNum >= begBlk->bbNum) { backedgeList = new (this, CMK_FlowList) flowList(predBlock, backedgeList); #if MEASURE_BLOCK_SIZE genFlowNodeCnt += 1; genFlowNodeSize += sizeof(flowList); #endif // MEASURE_BLOCK_SIZE } } // At least one backedge must have been found (the one from endBlk). noway_assert(backedgeList); auto reportBlockWeight = [&](BasicBlock* blk, const char* message) { #ifdef DEBUG if (verbose) { printf("\n " FMT_BB "(wt=" FMT_WT ")%s", blk->bbNum, blk->getBBWeight(this), message); } #endif // DEBUG }; for (BasicBlock* const curBlk : BasicBlockRangeList(begBlk, endBlk)) { // Don't change the block weight if it came from profile data. if (curBlk->hasProfileWeight()) { reportBlockWeight(curBlk, "; unchanged: has profile weight"); continue; } // Don't change the block weight if it's known to be rarely run. if (curBlk->isRunRarely()) { reportBlockWeight(curBlk, "; unchanged: run rarely"); continue; } // For curBlk to be part of a loop that starts at begBlk, curBlk must be reachable from begBlk and // (since this is a loop) begBlk must likewise be reachable from curBlk. if (fgReachable(curBlk, begBlk) && fgReachable(begBlk, curBlk)) { // If `curBlk` reaches any of the back edge blocks we set `reachable`. // If `curBlk` dominates any of the back edge blocks we set `dominates`. bool reachable = false; bool dominates = false; for (flowList* tmp = backedgeList; tmp != nullptr; tmp = tmp->flNext) { BasicBlock* backedge = tmp->getBlock(); reachable |= fgReachable(curBlk, backedge); dominates |= fgDominate(curBlk, backedge); if (dominates && reachable) { // No need to keep looking; we've already found all the info we need. break; } } if (reachable) { // If the block has BB_ZERO_WEIGHT, then it should be marked as rarely run, and skipped, above. noway_assert(curBlk->bbWeight > BB_ZERO_WEIGHT); weight_t scale = BB_LOOP_WEIGHT_SCALE; if (!dominates) { // If `curBlk` reaches but doesn't dominate any back edge to `endBlk` then there must be at least // some other path to `endBlk`, so don't give `curBlk` all the execution weight. scale = scale / 2; } curBlk->scaleBBWeight(scale); reportBlockWeight(curBlk, ""); } else { reportBlockWeight(curBlk, "; unchanged: back edge unreachable"); } } else { reportBlockWeight(curBlk, "; unchanged: block not in loop"); } } } //------------------------------------------------------------------------ // optUnmarkLoopBlocks: Unmark the blocks between 'begBlk' and 'endBlk' as part of a loop. // // Arguments: // begBlk - first block of range. Must be marked as a loop head (BBF_LOOP_HEAD). // endBlk - last block of range (inclusive). Must be reachable from `begBlk`. // // Operation: // A set of blocks that were previously marked as a loop are now to be unmarked, since we have decided that // for some reason this loop no longer exists. Basically we are just resetting the blocks bbWeight to their // previous values. // void Compiler::optUnmarkLoopBlocks(BasicBlock* begBlk, BasicBlock* endBlk) { noway_assert(begBlk->bbNum <= endBlk->bbNum); noway_assert(begBlk->isLoopHead()); noway_assert(!opts.MinOpts()); unsigned backEdgeCount = 0; for (BasicBlock* const predBlock : begBlk->PredBlocks()) { // Is this a backward edge? (from predBlock to begBlk) if (begBlk->bbNum > predBlock->bbNum) { continue; } // We only consider back-edges that are BBJ_COND or BBJ_ALWAYS for loops. if (!predBlock->KindIs(BBJ_COND, BBJ_ALWAYS)) { continue; } backEdgeCount++; } // Only unmark the loop blocks if we have exactly one loop back edge. if (backEdgeCount != 1) { #ifdef DEBUG if (verbose) { if (backEdgeCount > 0) { printf("\nNot removing loop at " FMT_BB ", due to an additional back edge", begBlk->bbNum); } else if (backEdgeCount == 0) { printf("\nNot removing loop at " FMT_BB ", due to no back edge", begBlk->bbNum); } } #endif return; } noway_assert(fgReachable(begBlk, endBlk)); #ifdef DEBUG if (verbose) { printf("\nUnmarking a loop from " FMT_BB " to " FMT_BB, begBlk->bbNum, endBlk->bbNum); } #endif for (BasicBlock* const curBlk : BasicBlockRangeList(begBlk, endBlk)) { // Stop if we go past the last block in the loop, as it may have been deleted. if (curBlk->bbNum > endBlk->bbNum) { break; } // Don't change the block weight if it's known to be rarely run. if (curBlk->isRunRarely()) { continue; } // Don't change the block weight if it came from profile data. if (curBlk->hasProfileWeight()) { continue; } // Don't unmark blocks that are maximum weight. if (curBlk->isMaxBBWeight()) { continue; } // For curBlk to be part of a loop that starts at begBlk, curBlk must be reachable from begBlk and // (since this is a loop) begBlk must likewise be reachable from curBlk. // if (fgReachable(curBlk, begBlk) && fgReachable(begBlk, curBlk)) { weight_t scale = 1.0 / BB_LOOP_WEIGHT_SCALE; if (!fgDominate(curBlk, endBlk)) { scale *= 2; } curBlk->scaleBBWeight(scale); JITDUMP("\n " FMT_BB "(wt=" FMT_WT ")", curBlk->bbNum, curBlk->getBBWeight(this)); } } JITDUMP("\n"); begBlk->unmarkLoopAlign(this DEBUG_ARG("Removed loop")); } /***************************************************************************************************** * * Function called to update the loop table and bbWeight before removing a block */ void Compiler::optUpdateLoopsBeforeRemoveBlock(BasicBlock* block, bool skipUnmarkLoop) { if (!optLoopsMarked) { return; } noway_assert(!opts.MinOpts()); bool removeLoop = false; // If an unreachable block is a loop entry or bottom then the loop is unreachable. // Special case: the block was the head of a loop - or pointing to a loop entry. for (unsigned loopNum = 0; loopNum < optLoopCount; loopNum++) { LoopDsc& loop = optLoopTable[loopNum]; // Some loops may have been already removed by loop unrolling or conditional folding. if (loop.lpFlags & LPFLG_REMOVED) { continue; } // Avoid printing to the JitDump unless we're actually going to change something. // If we call reportBefore, then we're going to change the loop table, and we should print the // `reportAfter` info as well. Only print the `reportBefore` info once, if multiple changes to // the table are made. INDEBUG(bool reportedBefore = false); auto reportBefore = [&]() { #ifdef DEBUG if (verbose && !reportedBefore) { printf("optUpdateLoopsBeforeRemoveBlock " FMT_BB " Before: ", block->bbNum); optPrintLoopInfo(loopNum); printf("\n"); reportedBefore = true; } #endif // DEBUG }; auto reportAfter = [&]() { #ifdef DEBUG if (verbose && reportedBefore) { printf("optUpdateLoopsBeforeRemoveBlock " FMT_BB " After: ", block->bbNum); optPrintLoopInfo(loopNum); printf("\n"); } #endif // DEBUG }; if (block == loop.lpEntry || block == loop.lpBottom) { reportBefore(); optMarkLoopRemoved(loopNum); reportAfter(); continue; } // If the loop is still in the table any block in the loop must be reachable. noway_assert((loop.lpEntry != block) && (loop.lpBottom != block)); if (loop.lpExit == block) { reportBefore(); assert(loop.lpExitCnt == 1); --loop.lpExitCnt; loop.lpExit = nullptr; } // If `block` flows to the loop entry then the whole loop will become unreachable if it is the // only non-loop predecessor. switch (block->bbJumpKind) { case BBJ_NONE: if (block->bbNext == loop.lpEntry) { removeLoop = true; } break; case BBJ_COND: if ((block->bbNext == loop.lpEntry) || (block->bbJumpDest == loop.lpEntry)) { removeLoop = true; } break; case BBJ_ALWAYS: if (block->bbJumpDest == loop.lpEntry) { removeLoop = true; } break; case BBJ_SWITCH: for (BasicBlock* const bTarget : block->SwitchTargets()) { if (bTarget == loop.lpEntry) { removeLoop = true; break; } } break; default: break; } if (removeLoop) { // Check if the entry has other predecessors outside the loop. // TODO: Replace this when predecessors are available. for (BasicBlock* const auxBlock : Blocks()) { // Ignore blocks in the loop. if (loop.lpContains(auxBlock)) { continue; } switch (auxBlock->bbJumpKind) { case BBJ_NONE: if (auxBlock->bbNext == loop.lpEntry) { removeLoop = false; } break; case BBJ_COND: if ((auxBlock->bbNext == loop.lpEntry) || (auxBlock->bbJumpDest == loop.lpEntry)) { removeLoop = false; } break; case BBJ_ALWAYS: if (auxBlock->bbJumpDest == loop.lpEntry) { removeLoop = false; } break; case BBJ_SWITCH: for (BasicBlock* const bTarget : auxBlock->SwitchTargets()) { if (bTarget == loop.lpEntry) { removeLoop = false; break; } } break; default: break; } } if (removeLoop) { reportBefore(); optMarkLoopRemoved(loopNum); } } else if (loop.lpHead == block) { reportBefore(); /* The loop has a new head - Just update the loop table */ loop.lpHead = block->bbPrev; } reportAfter(); } if ((skipUnmarkLoop == false) && // block->KindIs(BBJ_ALWAYS, BBJ_COND) && // block->bbJumpDest->isLoopHead() && // (block->bbJumpDest->bbNum <= block->bbNum) && // fgDomsComputed && // (fgCurBBEpochSize == fgDomBBcount + 1) && // fgReachable(block->bbJumpDest, block)) { optUnmarkLoopBlocks(block->bbJumpDest, block); } } //------------------------------------------------------------------------ // optClearLoopIterInfo: Clear the info related to LPFLG_ITER loops in the loop table. // The various fields related to iterators is known to be valid for loop cloning and unrolling, // but becomes invalid afterwards. Clear the info that might be used incorrectly afterwards // in JitDump or by subsequent phases. // void Compiler::optClearLoopIterInfo() { for (unsigned lnum = 0; lnum < optLoopCount; lnum++) { LoopDsc& loop = optLoopTable[lnum]; loop.lpFlags &= ~(LPFLG_ITER | LPFLG_VAR_INIT | LPFLG_CONST_INIT | LPFLG_SIMD_LIMIT | LPFLG_VAR_LIMIT | LPFLG_CONST_LIMIT | LPFLG_ARRLEN_LIMIT); loop.lpIterTree = nullptr; loop.lpInitBlock = nullptr; loop.lpConstInit = -1; // union with loop.lpVarInit loop.lpTestTree = nullptr; } } #ifdef DEBUG /***************************************************************************** * * Print loop info in an uniform way. */ void Compiler::optPrintLoopInfo(const LoopDsc* loop, bool printVerbose /* = false */) { assert(optLoopTable != nullptr); assert((&optLoopTable[0] <= loop) && (loop < &optLoopTable[optLoopCount])); unsigned lnum = (unsigned)(loop - optLoopTable); assert(lnum < optLoopCount); assert(&optLoopTable[lnum] == loop); if (loop->lpFlags & LPFLG_REMOVED) { // If a loop has been removed, it might be dangerous to print its fields (e.g., loop unrolling // nulls out the lpHead field). printf(FMT_LP " REMOVED", lnum); return; } printf(FMT_LP ", from " FMT_BB " to " FMT_BB " (Head=" FMT_BB ", Entry=" FMT_BB, lnum, loop->lpTop->bbNum, loop->lpBottom->bbNum, loop->lpHead->bbNum, loop->lpEntry->bbNum); if (loop->lpExitCnt == 1) { printf(", Exit=" FMT_BB, loop->lpExit->bbNum); } else { printf(", ExitCnt=%d", loop->lpExitCnt); } if (loop->lpParent != BasicBlock::NOT_IN_LOOP) { printf(", parent=" FMT_LP, loop->lpParent); } printf(")"); if (printVerbose) { if (loop->lpChild != BasicBlock::NOT_IN_LOOP) { printf(", child loop = " FMT_LP, loop->lpChild); } if (loop->lpSibling != BasicBlock::NOT_IN_LOOP) { printf(", sibling loop = " FMT_LP, loop->lpSibling); } // If an iterator loop print the iterator and the initialization. if (loop->lpFlags & LPFLG_ITER) { printf(" [over V%02u", loop->lpIterVar()); printf(" ("); printf(GenTree::OpName(loop->lpIterOper())); printf(" %d)", loop->lpIterConst()); if (loop->lpFlags & LPFLG_CONST_INIT) { printf(" from %d", loop->lpConstInit); } if (loop->lpFlags & LPFLG_VAR_INIT) { printf(" from V%02u", loop->lpVarInit); } if (loop->lpFlags & (LPFLG_CONST_INIT | LPFLG_VAR_INIT)) { if (loop->lpInitBlock != loop->lpHead) { printf(" (in " FMT_BB ")", loop->lpInitBlock->bbNum); } } // If a simple test condition print operator and the limits */ printf(" %s", GenTree::OpName(loop->lpTestOper())); if (loop->lpFlags & LPFLG_CONST_LIMIT) { printf(" %d", loop->lpConstLimit()); if (loop->lpFlags & LPFLG_SIMD_LIMIT) { printf(" (simd)"); } } if (loop->lpFlags & LPFLG_VAR_LIMIT) { printf(" V%02u", loop->lpVarLimit()); } if (loop->lpFlags & LPFLG_ARRLEN_LIMIT) { ArrIndex* index = new (getAllocator(CMK_DebugOnly)) ArrIndex(getAllocator(CMK_DebugOnly)); if (loop->lpArrLenLimit(this, index)) { printf(" "); index->Print(); printf(".Length"); } else { printf(" ???.Length"); } } printf("]"); } // Print the flags if (loop->lpFlags & LPFLG_CONTAINS_CALL) { printf(" call"); } if (loop->lpFlags & LPFLG_HAS_PREHEAD) { printf(" prehead"); } if (loop->lpFlags & LPFLG_DONT_UNROLL) { printf(" !unroll"); } if (loop->lpFlags & LPFLG_ASGVARS_YES) { printf(" avyes"); } if (loop->lpFlags & LPFLG_ASGVARS_INC) { printf(" avinc"); } } } void Compiler::optPrintLoopInfo(unsigned lnum, bool printVerbose /* = false */) { assert(lnum < optLoopCount); const LoopDsc& loop = optLoopTable[lnum]; optPrintLoopInfo(&loop, printVerbose); } //------------------------------------------------------------------------ // optPrintLoopTable: Print the loop table // void Compiler::optPrintLoopTable() { printf("\n*************** Natural loop table\n"); if (optLoopCount == 0) { printf("No loops\n"); } else { for (unsigned loopInd = 0; loopInd < optLoopCount; loopInd++) { optPrintLoopInfo(loopInd, /* verbose */ true); printf("\n"); } } printf("\n"); } #endif // DEBUG //------------------------------------------------------------------------ // optPopulateInitInfo: Populate loop init info in the loop table. // // Arguments: // loopInd - loop index // initBlock - block in which the initialization lives. // init - the tree that is supposed to initialize the loop iterator. // iterVar - loop iteration variable. // // Return Value: // "false" if the loop table could not be populated with the loop iterVar init info. // // Operation: // The 'init' tree is checked if its lhs is a local and rhs is either // a const or a local. // bool Compiler::optPopulateInitInfo(unsigned loopInd, BasicBlock* initBlock, GenTree* init, unsigned iterVar) { // Operator should be = if (init->gtOper != GT_ASG) { return false; } GenTree* lhs = init->AsOp()->gtOp1; GenTree* rhs = init->AsOp()->gtOp2; // LHS has to be local and should equal iterVar. if (lhs->gtOper != GT_LCL_VAR || lhs->AsLclVarCommon()->GetLclNum() != iterVar) { return false; } // RHS can be constant or local var. // TODO-CQ: CLONE: Add arr length for descending loops. if (rhs->gtOper == GT_CNS_INT && rhs->TypeGet() == TYP_INT) { optLoopTable[loopInd].lpFlags |= LPFLG_CONST_INIT; optLoopTable[loopInd].lpConstInit = (int)rhs->AsIntCon()->gtIconVal; optLoopTable[loopInd].lpInitBlock = initBlock; } else if (rhs->gtOper == GT_LCL_VAR) { optLoopTable[loopInd].lpFlags |= LPFLG_VAR_INIT; optLoopTable[loopInd].lpVarInit = rhs->AsLclVarCommon()->GetLclNum(); optLoopTable[loopInd].lpInitBlock = initBlock; } else { return false; } return true; } //---------------------------------------------------------------------------------- // optCheckIterInLoopTest: Check if iter var is used in loop test. // // Arguments: // test "jtrue" tree or an asg of the loop iter termination condition // from/to blocks (beg, end) which are part of the loop. // iterVar loop iteration variable. // loopInd loop index. // // Operation: // The test tree is parsed to check if "iterVar" matches the lhs of the condition // and the rhs limit is extracted from the "test" tree. The limit information is // added to the loop table. // // Return Value: // "false" if the loop table could not be populated with the loop test info or // if the test condition doesn't involve iterVar. // bool Compiler::optCheckIterInLoopTest( unsigned loopInd, GenTree* test, BasicBlock* from, BasicBlock* to, unsigned iterVar) { // Obtain the relop from the "test" tree. GenTree* relop; if (test->gtOper == GT_JTRUE) { relop = test->gtGetOp1(); } else { assert(test->gtOper == GT_ASG); relop = test->gtGetOp2(); } noway_assert(relop->OperIsCompare()); GenTree* opr1 = relop->AsOp()->gtOp1; GenTree* opr2 = relop->AsOp()->gtOp2; GenTree* iterOp; GenTree* limitOp; // Make sure op1 or op2 is the iterVar. if (opr1->gtOper == GT_LCL_VAR && opr1->AsLclVarCommon()->GetLclNum() == iterVar) { iterOp = opr1; limitOp = opr2; } else if (opr2->gtOper == GT_LCL_VAR && opr2->AsLclVarCommon()->GetLclNum() == iterVar) { iterOp = opr2; limitOp = opr1; } else { return false; } if (iterOp->gtType != TYP_INT) { return false; } // Mark the iterator node. iterOp->gtFlags |= GTF_VAR_ITERATOR; // Check what type of limit we have - constant, variable or arr-len. if (limitOp->gtOper == GT_CNS_INT) { optLoopTable[loopInd].lpFlags |= LPFLG_CONST_LIMIT; if ((limitOp->gtFlags & GTF_ICON_SIMD_COUNT) != 0) { optLoopTable[loopInd].lpFlags |= LPFLG_SIMD_LIMIT; } } else if (limitOp->gtOper == GT_LCL_VAR && !optIsVarAssigned(from, to, nullptr, limitOp->AsLclVarCommon()->GetLclNum())) { optLoopTable[loopInd].lpFlags |= LPFLG_VAR_LIMIT; } else if (limitOp->gtOper == GT_ARR_LENGTH) { optLoopTable[loopInd].lpFlags |= LPFLG_ARRLEN_LIMIT; } else { return false; } // Save the type of the comparison between the iterator and the limit. optLoopTable[loopInd].lpTestTree = relop; return true; } //---------------------------------------------------------------------------------- // optIsLoopIncrTree: Check if loop is a tree of form v += 1 or v = v + 1 // // Arguments: // incr The incr tree to be checked. Whether incr tree is // oper-equal(+=, -=...) type nodes or v=v+1 type ASG nodes. // // Operation: // The test tree is parsed to check if "iterVar" matches the lhs of the condition // and the rhs limit is extracted from the "test" tree. The limit information is // added to the loop table. // // Return Value: // iterVar local num if the iterVar is found, otherwise BAD_VAR_NUM. // unsigned Compiler::optIsLoopIncrTree(GenTree* incr) { GenTree* incrVal; genTreeOps updateOper; unsigned iterVar = incr->IsLclVarUpdateTree(&incrVal, &updateOper); if (iterVar != BAD_VAR_NUM) { // We have v = v op y type asg node. switch (updateOper) { case GT_ADD: case GT_SUB: case GT_MUL: case GT_RSH: case GT_LSH: break; default: return BAD_VAR_NUM; } // Increment should be by a const int. // TODO-CQ: CLONE: allow variable increments. if ((incrVal->gtOper != GT_CNS_INT) || (incrVal->TypeGet() != TYP_INT)) { return BAD_VAR_NUM; } } return iterVar; } //---------------------------------------------------------------------------------- // optComputeIterInfo: Check tree is loop increment of a lcl that is loop-invariant. // // Arguments: // from, to - are blocks (beg, end) which are part of the loop. // incr - tree that increments the loop iterator. v+=1 or v=v+1. // pIterVar - see return value. // // Return Value: // Returns true if iterVar "v" can be returned in "pIterVar", otherwise returns // false. // // Operation: // Check if the "incr" tree is a "v=v+1 or v+=1" type tree and make sure it is not // assigned in the loop. // bool Compiler::optComputeIterInfo(GenTree* incr, BasicBlock* from, BasicBlock* to, unsigned* pIterVar) { unsigned iterVar = optIsLoopIncrTree(incr); if (iterVar == BAD_VAR_NUM) { return false; } if (optIsVarAssigned(from, to, incr, iterVar)) { JITDUMP("iterVar is assigned in loop\n"); return false; } *pIterVar = iterVar; return true; } //---------------------------------------------------------------------------------- // optIsLoopTestEvalIntoTemp: // Pattern match if the test tree is computed into a tmp // and the "tmp" is used as jump condition for loop termination. // // Arguments: // testStmt - is the JTRUE statement that is of the form: jmpTrue (Vtmp != 0) // where Vtmp contains the actual loop test result. // newTestStmt - contains the statement that is the actual test stmt involving // the loop iterator. // // Return Value: // Returns true if a new test tree can be obtained. // // Operation: // Scan if the current stmt is a jtrue with (Vtmp != 0) as condition // Then returns the rhs for def of Vtmp as the "test" node. // // Note: // This method just retrieves what it thinks is the "test" node, // the callers are expected to verify that "iterVar" is used in the test. // bool Compiler::optIsLoopTestEvalIntoTemp(Statement* testStmt, Statement** newTestStmt) { GenTree* test = testStmt->GetRootNode(); if (test->gtOper != GT_JTRUE) { return false; } GenTree* relop = test->gtGetOp1(); noway_assert(relop->OperIsCompare()); GenTree* opr1 = relop->AsOp()->gtOp1; GenTree* opr2 = relop->AsOp()->gtOp2; // Make sure we have jtrue (vtmp != 0) if ((relop->OperGet() == GT_NE) && (opr1->OperGet() == GT_LCL_VAR) && (opr2->OperGet() == GT_CNS_INT) && opr2->IsIntegralConst(0)) { // Get the previous statement to get the def (rhs) of Vtmp to see // if the "test" is evaluated into Vtmp. Statement* prevStmt = testStmt->GetPrevStmt(); if (prevStmt == nullptr) { return false; } GenTree* tree = prevStmt->GetRootNode(); if (tree->OperGet() == GT_ASG) { GenTree* lhs = tree->AsOp()->gtOp1; GenTree* rhs = tree->AsOp()->gtOp2; // Return as the new test node. if (lhs->gtOper == GT_LCL_VAR && lhs->AsLclVarCommon()->GetLclNum() == opr1->AsLclVarCommon()->GetLclNum()) { if (rhs->OperIsCompare()) { *newTestStmt = prevStmt; return true; } } } } return false; } //---------------------------------------------------------------------------------- // optExtractInitTestIncr: // Extract the "init", "test" and "incr" nodes of the loop. // // Arguments: // head - Loop head block // bottom - Loop bottom block // top - Loop top block // ppInit - The init stmt of the loop if found. // ppTest - The test stmt of the loop if found. // ppIncr - The incr stmt of the loop if found. // // Return Value: // The results are put in "ppInit", "ppTest" and "ppIncr" if the method // returns true. Returns false if the information can't be extracted. // // Operation: // Check if the "test" stmt is last stmt in the loop "bottom". If found good, // "test" stmt is found. Try to find the "incr" stmt. Check previous stmt of // "test" to get the "incr" stmt. If it is not found it could be a loop of the // below form. // // +-------<-----------------<-----------+ // | | // v | // BBinit(head) -> BBcond(top) -> BBLoopBody(bottom) ---^ // // Check if the "incr" tree is present in the loop "top" node as the last stmt. // Also check if the "test" tree is assigned to a tmp node and the tmp is used // in the jtrue condition. // // Note: // This method just retrieves what it thinks is the "test" node, // the callers are expected to verify that "iterVar" is used in the test. // bool Compiler::optExtractInitTestIncr( BasicBlock* head, BasicBlock* bottom, BasicBlock* top, GenTree** ppInit, GenTree** ppTest, GenTree** ppIncr) { assert(ppInit != nullptr); assert(ppTest != nullptr); assert(ppIncr != nullptr); // Check if last two statements in the loop body are the increment of the iterator // and the loop termination test. noway_assert(bottom->bbStmtList != nullptr); Statement* testStmt = bottom->lastStmt(); noway_assert(testStmt != nullptr && testStmt->GetNextStmt() == nullptr); Statement* newTestStmt; if (optIsLoopTestEvalIntoTemp(testStmt, &newTestStmt)) { testStmt = newTestStmt; } // Check if we have the incr stmt before the test stmt, if we don't, // check if incr is part of the loop "top". Statement* incrStmt = testStmt->GetPrevStmt(); if (incrStmt == nullptr || optIsLoopIncrTree(incrStmt->GetRootNode()) == BAD_VAR_NUM) { if (top == nullptr || top->bbStmtList == nullptr || top->bbStmtList->GetPrevStmt() == nullptr) { return false; } // If the prev stmt to loop test is not incr, then check if we have loop test evaluated into a tmp. Statement* toplastStmt = top->lastStmt(); if (optIsLoopIncrTree(toplastStmt->GetRootNode()) != BAD_VAR_NUM) { incrStmt = toplastStmt; } else { return false; } } assert(testStmt != incrStmt); // Find the last statement in the loop pre-header which we expect to be the initialization of // the loop iterator. Statement* phdrStmt = head->firstStmt(); if (phdrStmt == nullptr) { return false; } Statement* initStmt = phdrStmt->GetPrevStmt(); noway_assert(initStmt != nullptr && (initStmt->GetNextStmt() == nullptr)); // If it is a duplicated loop condition, skip it. if (initStmt->GetRootNode()->OperIs(GT_JTRUE)) { bool doGetPrev = true; #ifdef DEBUG if (opts.optRepeat) { // Previous optimization passes may have inserted compiler-generated // statements other than duplicated loop conditions. doGetPrev = (initStmt->GetPrevStmt() != nullptr); } else { // Must be a duplicated loop condition. noway_assert(initStmt->GetRootNode()->gtOper == GT_JTRUE); } #endif // DEBUG if (doGetPrev) { initStmt = initStmt->GetPrevStmt(); } noway_assert(initStmt != nullptr); } *ppInit = initStmt->GetRootNode(); *ppTest = testStmt->GetRootNode(); *ppIncr = incrStmt->GetRootNode(); return true; } /***************************************************************************** * * Record the loop in the loop table. Return true if successful, false if * out of entries in loop table. */ bool Compiler::optRecordLoop( BasicBlock* head, BasicBlock* top, BasicBlock* entry, BasicBlock* bottom, BasicBlock* exit, unsigned char exitCnt) { if (exitCnt == 1) { noway_assert(exit != nullptr); } // Record this loop in the table, if there's room. assert(optLoopCount <= BasicBlock::MAX_LOOP_NUM); if (optLoopCount == BasicBlock::MAX_LOOP_NUM) { #if COUNT_LOOPS loopOverflowThisMethod = true; #endif return false; } // Assumed preconditions on the loop we're adding. assert(top->bbNum <= entry->bbNum); assert(entry->bbNum <= bottom->bbNum); assert(head->bbNum < top->bbNum || head->bbNum > bottom->bbNum); unsigned char loopInd = optLoopCount; if (optLoopTable == nullptr) { assert(loopInd == 0); optLoopTable = getAllocator(CMK_LoopOpt).allocate<LoopDsc>(BasicBlock::MAX_LOOP_NUM); NewLoopEpoch(); } else { // If the new loop contains any existing ones, add it in the right place. for (unsigned char prevPlus1 = optLoopCount; prevPlus1 > 0; prevPlus1--) { unsigned char prev = prevPlus1 - 1; if (optLoopTable[prev].lpContainedBy(top, bottom)) { loopInd = prev; } } // Move up any loops if necessary. for (unsigned j = optLoopCount; j > loopInd; j--) { optLoopTable[j] = optLoopTable[j - 1]; } } #ifdef DEBUG for (unsigned i = loopInd + 1; i < optLoopCount; i++) { // The loop is well-formed. assert(optLoopTable[i].lpWellFormed()); // Check for disjoint. if (optLoopTable[i].lpDisjoint(top, bottom)) { continue; } // Otherwise, assert complete containment (of optLoopTable[i] in new loop). assert(optLoopTable[i].lpContainedBy(top, bottom)); } #endif // DEBUG optLoopTable[loopInd].lpHead = head; optLoopTable[loopInd].lpTop = top; optLoopTable[loopInd].lpBottom = bottom; optLoopTable[loopInd].lpEntry = entry; optLoopTable[loopInd].lpExit = exit; optLoopTable[loopInd].lpExitCnt = exitCnt; optLoopTable[loopInd].lpParent = BasicBlock::NOT_IN_LOOP; optLoopTable[loopInd].lpChild = BasicBlock::NOT_IN_LOOP; optLoopTable[loopInd].lpSibling = BasicBlock::NOT_IN_LOOP; optLoopTable[loopInd].lpAsgVars = AllVarSetOps::UninitVal(); optLoopTable[loopInd].lpFlags = LPFLG_EMPTY; // We haven't yet recorded any side effects. for (MemoryKind memoryKind : allMemoryKinds()) { optLoopTable[loopInd].lpLoopHasMemoryHavoc[memoryKind] = false; } optLoopTable[loopInd].lpFieldsModified = nullptr; optLoopTable[loopInd].lpArrayElemTypesModified = nullptr; // // Try to find loops that have an iterator (i.e. for-like loops) "for (init; test; incr){ ... }" // We have the following restrictions: // 1. The loop condition must be a simple one i.e. only one JTRUE node // 2. There must be a loop iterator (a local var) that is // incremented (decremented or lsh, rsh, mul) with a constant value // 3. The iterator is incremented exactly once // 4. The loop condition must use the iterator. // if (bottom->bbJumpKind == BBJ_COND) { GenTree* init; GenTree* test; GenTree* incr; if (!optExtractInitTestIncr(head, bottom, top, &init, &test, &incr)) { goto DONE_LOOP; } unsigned iterVar = BAD_VAR_NUM; if (!optComputeIterInfo(incr, head->bbNext, bottom, &iterVar)) { goto DONE_LOOP; } // Make sure the "iterVar" initialization is never skipped, // i.e. every pred of ENTRY other than HEAD is in the loop. for (BasicBlock* const predBlock : entry->PredBlocks()) { if ((predBlock != head) && !optLoopTable[loopInd].lpContains(predBlock)) { goto DONE_LOOP; } } if (!optPopulateInitInfo(loopInd, head, init, iterVar)) { goto DONE_LOOP; } // Check that the iterator is used in the loop condition. if (!optCheckIterInLoopTest(loopInd, test, head->bbNext, bottom, iterVar)) { goto DONE_LOOP; } // We know the loop has an iterator at this point ->flag it as LPFLG_ITER // Record the iterator, the pointer to the test node // and the initial value of the iterator (constant or local var) optLoopTable[loopInd].lpFlags |= LPFLG_ITER; // Record iterator. optLoopTable[loopInd].lpIterTree = incr; #if COUNT_LOOPS // Save the initial value of the iterator - can be lclVar or constant // Flag the loop accordingly. iterLoopCount++; #endif #if COUNT_LOOPS simpleTestLoopCount++; #endif #if COUNT_LOOPS // Check if a constant iteration loop. if ((optLoopTable[loopInd].lpFlags & LPFLG_CONST_INIT) && (optLoopTable[loopInd].lpFlags & LPFLG_CONST_LIMIT)) { // This is a constant loop. constIterLoopCount++; } #endif #ifdef DEBUG if (verbose && 0) { printf("\nConstant loop initializer:\n"); gtDispTree(init); printf("\nConstant loop body:\n"); BasicBlock* block = head; do { block = block->bbNext; for (Statement* const stmt : block->Statements()) { if (stmt->GetRootNode() == incr) { break; } printf("\n"); gtDispTree(stmt->GetRootNode()); } } while (block != bottom); } #endif // DEBUG } DONE_LOOP: bool loopInsertedAtEnd = (loopInd == optLoopCount); optLoopCount++; #ifdef DEBUG if (verbose) { printf("Recorded loop %s", loopInsertedAtEnd ? "" : "(extended) "); optPrintLoopInfo(loopInd, /* verbose */ true); printf("\n"); } #endif // DEBUG return true; } #ifdef DEBUG void Compiler::optCheckPreds() { for (BasicBlock* const block : Blocks()) { for (BasicBlock* const predBlock : block->PredBlocks()) { // make sure this pred is part of the BB list BasicBlock* bb; for (bb = fgFirstBB; bb; bb = bb->bbNext) { if (bb == predBlock) { break; } } noway_assert(bb); switch (bb->bbJumpKind) { case BBJ_COND: if (bb->bbJumpDest == block) { break; } FALLTHROUGH; case BBJ_NONE: noway_assert(bb->bbNext == block); break; case BBJ_EHFILTERRET: case BBJ_ALWAYS: case BBJ_EHCATCHRET: noway_assert(bb->bbJumpDest == block); break; default: break; } } } } #endif // DEBUG namespace { //------------------------------------------------------------------------ // LoopSearch: Class that handles scanning a range of blocks to detect a loop, // moving blocks to make the loop body contiguous, and recording the loop. // // We will use the following terminology: // HEAD - the basic block that flows into the loop ENTRY block (Currently MUST be lexically before entry). // Not part of the looping of the loop. // TOP - the target of the backward edge from BOTTOM, and the lexically first basic block (in bbNext order) // within this loop. // BOTTOM - the lexically last block in the loop (i.e. the block from which we jump to the top) // EXIT - the predecessor of loop's unique exit edge, if it has a unique exit edge; else nullptr // ENTRY - the entry in the loop (not necessarly the TOP), but there must be only one entry // // We (currently) require the body of a loop to be a contiguous (in bbNext order) sequence of basic blocks. // When the loop is identified, blocks will be moved out to make it a compact contiguous region if possible, // and in cases where compaction is not possible, we'll subsequently treat all blocks in the lexical range // between TOP and BOTTOM as part of the loop even if they aren't part of the SCC. // Regarding nesting: Since a given block can only have one back-edge (we only detect loops with back-edges // from BBJ_COND or BBJ_ALWAYS blocks), no two loops will share the same BOTTOM. Two loops may share the // same TOP/ENTRY as reported by LoopSearch, and optCanonicalizeLoopNest will subsequently re-write // the CFG so that no two loops share the same TOP/ENTRY anymore. // // | // v // head // | // | top <--+ // | | | // | ... | // | | | // | v | // +---> entry | // | | // ... | // | | // v | // +-- exit/tail | // | | | // | ... | // | | | // | v | // | bottom ---+ // | // +------+ // | // v // class LoopSearch { // Keeping track of which blocks are in the loop requires two block sets since we may add blocks // as we go but the BlockSet type's max ID doesn't increase to accommodate them. Define a helper // struct to make the ensuing code more readable. struct LoopBlockSet { private: // Keep track of blocks with bbNum <= oldBlockMaxNum in a regular BlockSet, since // it can hold all of them. BlockSet oldBlocksInLoop; // Blocks with bbNum <= oldBlockMaxNum // Keep track of blocks with bbNum > oldBlockMaxNum in a separate BlockSet, but // indexing them by (blockNum - oldBlockMaxNum); since we won't generate more than // one new block per old block, this must be sufficient to track any new blocks. BlockSet newBlocksInLoop; // Blocks with bbNum > oldBlockMaxNum Compiler* comp; unsigned int oldBlockMaxNum; public: LoopBlockSet(Compiler* comp) : oldBlocksInLoop(BlockSetOps::UninitVal()) , newBlocksInLoop(BlockSetOps::UninitVal()) , comp(comp) , oldBlockMaxNum(comp->fgBBNumMax) { } void Reset(unsigned int seedBlockNum) { if (BlockSetOps::MayBeUninit(oldBlocksInLoop)) { // Either the block sets are uninitialized (and long), so we need to initialize // them (and allocate their backing storage), or they are short and empty, so // assigning MakeEmpty to them is as cheap as ClearD. oldBlocksInLoop = BlockSetOps::MakeEmpty(comp); newBlocksInLoop = BlockSetOps::MakeEmpty(comp); } else { // We know the backing storage is already allocated, so just clear it. BlockSetOps::ClearD(comp, oldBlocksInLoop); BlockSetOps::ClearD(comp, newBlocksInLoop); } assert(seedBlockNum <= oldBlockMaxNum); BlockSetOps::AddElemD(comp, oldBlocksInLoop, seedBlockNum); } bool CanRepresent(unsigned int blockNum) { // We can represent old blocks up to oldBlockMaxNum, and // new blocks up to 2 * oldBlockMaxNum. return (blockNum <= 2 * oldBlockMaxNum); } bool IsMember(unsigned int blockNum) { if (blockNum > oldBlockMaxNum) { return BlockSetOps::IsMember(comp, newBlocksInLoop, blockNum - oldBlockMaxNum); } else { return BlockSetOps::IsMember(comp, oldBlocksInLoop, blockNum); } } void Insert(unsigned int blockNum) { if (blockNum > oldBlockMaxNum) { BlockSetOps::AddElemD(comp, newBlocksInLoop, blockNum - oldBlockMaxNum); } else { BlockSetOps::AddElemD(comp, oldBlocksInLoop, blockNum); } } bool TestAndInsert(unsigned int blockNum) { if (blockNum > oldBlockMaxNum) { unsigned int shiftedNum = blockNum - oldBlockMaxNum; if (!BlockSetOps::IsMember(comp, newBlocksInLoop, shiftedNum)) { BlockSetOps::AddElemD(comp, newBlocksInLoop, shiftedNum); return false; } } else { if (!BlockSetOps::IsMember(comp, oldBlocksInLoop, blockNum)) { BlockSetOps::AddElemD(comp, oldBlocksInLoop, blockNum); return false; } } return true; } }; LoopBlockSet loopBlocks; // Set of blocks identified as part of the loop Compiler* comp; // See LoopSearch class comment header for a diagram relating these fields: BasicBlock* head; // Predecessor of unique entry edge BasicBlock* top; // Successor of back-edge from BOTTOM BasicBlock* bottom; // Predecessor of back-edge to TOP, also lexically last in-loop block BasicBlock* entry; // Successor of unique entry edge BasicBlock* lastExit; // Most recently discovered exit block unsigned char exitCount; // Number of discovered exit edges unsigned int oldBlockMaxNum; // Used to identify new blocks created during compaction BlockSet bottomBlocks; // BOTTOM blocks of already-recorded loops #ifdef DEBUG bool forgotExit = false; // Flags a rare case where lastExit gets nulled out, for assertions #endif bool changedFlowGraph = false; // Signals that loop compaction has modified the flow graph public: LoopSearch(Compiler* comp) : loopBlocks(comp), comp(comp), oldBlockMaxNum(comp->fgBBNumMax), bottomBlocks(BlockSetOps::MakeEmpty(comp)) { // Make sure we've renumbered such that the bitsets can hold all the bits assert(comp->fgBBNumMax <= comp->fgCurBBEpochSize); } //------------------------------------------------------------------------ // RecordLoop: Notify the Compiler that a loop has been found. // // Return Value: // true - Loop successfully recorded. // false - Compiler has run out of loop descriptors; loop not recorded. // bool RecordLoop() { // At this point we have a compact loop - record it in the loop table. // If we found only one exit, record it in the table too // (otherwise an exit = nullptr in the loop table means multiple exits). BasicBlock* onlyExit = (exitCount == 1 ? lastExit : nullptr); if (comp->optRecordLoop(head, top, entry, bottom, onlyExit, exitCount)) { // Record the BOTTOM block for future reference before returning. assert(bottom->bbNum <= oldBlockMaxNum); BlockSetOps::AddElemD(comp, bottomBlocks, bottom->bbNum); return true; } // Unable to record this loop because the loop descriptor table overflowed. return false; } //------------------------------------------------------------------------ // ChangedFlowGraph: Determine whether loop compaction has modified the flow graph. // // Return Value: // true - The flow graph has been modified; fgUpdateChangedFlowGraph should // be called (which is the caller's responsibility). // false - The flow graph has not been modified by this LoopSearch. // bool ChangedFlowGraph() { return changedFlowGraph; } //------------------------------------------------------------------------ // FindLoop: Search for a loop with the given HEAD block and back-edge. // // Arguments: // head - Block to be the HEAD of any loop identified // top - Block to be the TOP of any loop identified // bottom - Block to be the BOTTOM of any loop identified // // Return Value: // true - Found a valid loop. // false - Did not find a valid loop. // // Notes: // May modify flow graph to make loop compact before returning. // Will set instance fields to track loop's extent and exits if a valid // loop is found, and potentially trash them otherwise. // bool FindLoop(BasicBlock* head, BasicBlock* top, BasicBlock* bottom) { // Is this a loop candidate? - We look for "back edges", i.e. an edge from BOTTOM // to TOP (note that this is an abuse of notation since this is not necessarily a back edge // as the definition says, but merely an indication that we have a loop there). // Thus, we have to be very careful and after entry discovery check that it is indeed // the only place we enter the loop (especially for non-reducible flow graphs). if (top->bbNum > bottom->bbNum) // is this a backward edge? (from BOTTOM to TOP) { // Edge from BOTTOM to TOP is not a backward edge return false; } if (bottom->bbNum > oldBlockMaxNum) { // Not a true back-edge; bottom is a block added to reconnect fall-through during // loop processing, so its block number does not reflect its position. return false; } if (bottom->KindIs(BBJ_EHFINALLYRET, BBJ_EHFILTERRET, BBJ_EHCATCHRET, BBJ_CALLFINALLY, BBJ_SWITCH)) { // BBJ_EHFINALLYRET, BBJ_EHFILTERRET, BBJ_EHCATCHRET, and BBJ_CALLFINALLY can never form a loop. // BBJ_SWITCH that has a backward jump appears only for labeled break. return false; } // The presence of a "back edge" is an indication that a loop might be present here. // // Definition: A loop is: // 1. A collection of STRONGLY CONNECTED nodes i.e. there is a path from any // node in the loop to any other node in the loop (wholly within the loop) // 2. The loop has a unique ENTRY, i.e. there is only one way to reach a node // in the loop from outside the loop, and that is through the ENTRY // Let's find the loop ENTRY BasicBlock* entry = FindEntry(head, top, bottom); if (entry == nullptr) { // For now, we only recognize loops where HEAD has some successor ENTRY in the loop. return false; } // Passed the basic checks; initialize instance state for this back-edge. this->head = head; this->top = top; this->entry = entry; this->bottom = bottom; this->lastExit = nullptr; this->exitCount = 0; if (!HasSingleEntryCycle()) { // There isn't actually a loop between TOP and BOTTOM return false; } if (!loopBlocks.IsMember(top->bbNum)) { // The "back-edge" we identified isn't actually part of the flow cycle containing ENTRY return false; } // Disqualify loops where the first block of the loop is less nested in EH than // the bottom block. That is, we don't want to handle loops where the back edge // goes from within an EH region to a first block that is outside that same EH // region. Note that we *do* handle loops where the first block is the *first* // block of a more nested EH region (since it is legal to branch to the first // block of an immediately more nested EH region). So, for example, disqualify // this: // // BB02 // ... // try { // ... // BB10 BBJ_COND => BB02 // ... // } // // Here, BB10 is more nested than BB02. if (bottom->hasTryIndex() && !comp->bbInTryRegions(bottom->getTryIndex(), top)) { JITDUMP("Loop 'top' " FMT_BB " is in an outer EH region compared to loop 'bottom' " FMT_BB ". Rejecting " "loop.\n", top->bbNum, bottom->bbNum); return false; } #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) // Disqualify loops where the first block of the loop is a finally target. // The main problem is when multiple loops share a 'top' block that is a finally // target and we canonicalize the loops by adding a new loop head. In that case, we // need to update the blocks so the finally target bit is moved to the newly created // block, and removed from the old 'top' block. This is 'hard', so it's easier to disallow // the loop than to update the flow graph to support this case. if ((top->bbFlags & BBF_FINALLY_TARGET) != 0) { JITDUMP("Loop 'top' " FMT_BB " is a finally target. Rejecting loop.\n", top->bbNum); return false; } #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) // Compact the loop (sweep through it and move out any blocks that aren't part of the // flow cycle), and find the exits. if (!MakeCompactAndFindExits()) { // Unable to preserve well-formed loop during compaction. return false; } // We have a valid loop. return true; } //------------------------------------------------------------------------ // GetExitCount: Return the exit count computed for the loop // unsigned char GetExitCount() const { return exitCount; } private: //------------------------------------------------------------------------ // FindEntry: See if given HEAD flows to valid ENTRY between given TOP and BOTTOM // // Arguments: // head - Block to be the HEAD of any loop identified // top - Block to be the TOP of any loop identified // bottom - Block to be the BOTTOM of any loop identified // // Return Value: // Block to be the ENTRY of any loop identified, or nullptr if no // such entry meeting our criteria can be found. // // Notes: // Returns main entry if one is found, does not check for side-entries. // BasicBlock* FindEntry(BasicBlock* head, BasicBlock* top, BasicBlock* bottom) { if (head->bbJumpKind == BBJ_ALWAYS) { if (head->bbJumpDest->bbNum <= bottom->bbNum && head->bbJumpDest->bbNum >= top->bbNum) { // OK - we enter somewhere within the loop. // Cannot enter at the top - should have being caught by redundant jumps assert((head->bbJumpDest != top) || (head->bbFlags & BBF_KEEP_BBJ_ALWAYS)); return head->bbJumpDest; } else { // special case - don't consider now // assert (!"Loop entered in weird way!"); return nullptr; } } // Can we fall through into the loop? else if (head->KindIs(BBJ_NONE, BBJ_COND)) { // The ENTRY is at the TOP (a do-while loop) return top; } else { return nullptr; // HEAD does not flow into the loop; bail for now } } //------------------------------------------------------------------------ // HasSingleEntryCycle: Perform a reverse flow walk from ENTRY, visiting // only blocks between TOP and BOTTOM, to determine if such a cycle // exists and if it has a single entry. // // Return Value: // true - Found a single-entry cycle. // false - Did not find a single-entry cycle. // // Notes: // Will mark (in `loopBlocks`) all blocks found to participate in the cycle. // bool HasSingleEntryCycle() { // Now do a backwards flow walk from entry to see if we have a single-entry loop bool foundCycle = false; // Seed the loop block set and worklist with the entry block. loopBlocks.Reset(entry->bbNum); jitstd::list<BasicBlock*> worklist(comp->getAllocator(CMK_LoopOpt)); worklist.push_back(entry); while (!worklist.empty()) { BasicBlock* block = worklist.back(); worklist.pop_back(); // Make sure ENTRY dominates all blocks in the loop. if (block->bbNum > oldBlockMaxNum) { // This is a new block we added to connect fall-through, so the // recorded dominator information doesn't cover it. Just continue, // and when we process its unique predecessor we'll abort if ENTRY // doesn't dominate that. } else if (!comp->fgDominate(entry, block)) { return false; } // Add preds to the worklist, checking for side-entries. for (BasicBlock* const predBlock : block->PredBlocks()) { unsigned int testNum = PositionNum(predBlock); if ((testNum < top->bbNum) || (testNum > bottom->bbNum)) { // Pred is out of loop range if (block == entry) { if (predBlock == head) { // This is the single entry we expect. continue; } // ENTRY has some pred other than head outside the loop. If ENTRY does not // dominate this pred, we'll consider this a side-entry and skip this loop; // otherwise the loop is still valid and this may be a (flow-wise) back-edge // of an outer loop. For the dominance test, if `predBlock` is a new block, use // its unique predecessor since the dominator tree has info for that. BasicBlock* effectivePred = (predBlock->bbNum > oldBlockMaxNum ? predBlock->bbPrev : predBlock); if (comp->fgDominate(entry, effectivePred)) { // Outer loop back-edge continue; } } // There are multiple entries to this loop, don't consider it. return false; } bool isFirstVisit; if (predBlock == entry) { // We have indeed found a cycle in the flow graph. isFirstVisit = !foundCycle; foundCycle = true; assert(loopBlocks.IsMember(predBlock->bbNum)); } else if (loopBlocks.TestAndInsert(predBlock->bbNum)) { // Already visited this pred isFirstVisit = false; } else { // Add this predBlock to the worklist worklist.push_back(predBlock); isFirstVisit = true; } if (isFirstVisit && (predBlock->bbNext != nullptr) && (PositionNum(predBlock->bbNext) == predBlock->bbNum)) { // We've created a new block immediately after `predBlock` to // reconnect what was fall-through. Mark it as in-loop also; // it needs to stay with `prev` and if it exits the loop we'd // just need to re-create it if we tried to move it out. loopBlocks.Insert(predBlock->bbNext->bbNum); } } } return foundCycle; } //------------------------------------------------------------------------ // PositionNum: Get the number identifying a block's position per the // lexical ordering that existed before searching for (and compacting) // loops. // // Arguments: // block - Block whose position is desired. // // Return Value: // A number indicating that block's position relative to others. // // Notes: // When the given block is a new one created during loop compaction, // the number of its unique predecessor is returned. // unsigned int PositionNum(BasicBlock* block) { if (block->bbNum > oldBlockMaxNum) { // This must be a block we inserted to connect fall-through after moving blocks. // To determine if it's in the loop or not, use the number of its unique predecessor // block. assert(block->bbPreds->getBlock() == block->bbPrev); assert(block->bbPreds->flNext == nullptr); return block->bbPrev->bbNum; } return block->bbNum; } //------------------------------------------------------------------------ // MakeCompactAndFindExits: Compact the loop (sweep through it and move out // any blocks that aren't part of the flow cycle), and find the exits (set // lastExit and exitCount). // // Return Value: // true - Loop successfully compacted (or `loopBlocks` expanded to // include all blocks in the lexical range), exits enumerated. // false - Loop cannot be made compact and remain well-formed. // bool MakeCompactAndFindExits() { // Compaction (if it needs to happen) will require an insertion point. BasicBlock* moveAfter = nullptr; for (BasicBlock* previous = top->bbPrev; previous != bottom;) { BasicBlock* block = previous->bbNext; if (loopBlocks.IsMember(block->bbNum)) { // This block is a member of the loop. Check to see if it may exit the loop. CheckForExit(block); // Done processing this block; move on to the next. previous = block; continue; } // This blocks is lexically between TOP and BOTTOM, but it does not // participate in the flow cycle. Check for a run of consecutive // such blocks. BasicBlock* lastNonLoopBlock = block; BasicBlock* nextLoopBlock = block->bbNext; while (!loopBlocks.IsMember(nextLoopBlock->bbNum)) { lastNonLoopBlock = nextLoopBlock; nextLoopBlock = nextLoopBlock->bbNext; // This loop must terminate because we know BOTTOM is in loopBlocks. } // Choose an insertion point for non-loop blocks if we haven't yet done so. if (moveAfter == nullptr) { moveAfter = FindInsertionPoint(); } if (!BasicBlock::sameEHRegion(previous, nextLoopBlock) || !BasicBlock::sameEHRegion(previous, moveAfter)) { // EH regions would be ill-formed if we moved these blocks out. // See if we can consider them loop blocks without introducing // a side-entry. if (CanTreatAsLoopBlocks(block, lastNonLoopBlock)) { // The call to `canTreatAsLoop` marked these blocks as part of the loop; // iterate without updating `previous` so that we'll analyze them as part // of the loop. continue; } else { // We can't move these out of the loop or leave them in, so just give // up on this loop. return false; } } // Now physically move the blocks. BasicBlock* moveBefore = moveAfter->bbNext; comp->fgUnlinkRange(block, lastNonLoopBlock); comp->fgMoveBlocksAfter(block, lastNonLoopBlock, moveAfter); comp->ehUpdateLastBlocks(moveAfter, lastNonLoopBlock); // Apply any adjustments needed for fallthrough at the boundaries of the moved region. FixupFallThrough(moveAfter, moveBefore, block); FixupFallThrough(lastNonLoopBlock, nextLoopBlock, moveBefore); // Also apply any adjustments needed where the blocks were snipped out of the loop. BasicBlock* newBlock = FixupFallThrough(previous, block, nextLoopBlock); if (newBlock != nullptr) { // This new block is in the loop and is a loop exit. loopBlocks.Insert(newBlock->bbNum); lastExit = newBlock; ++exitCount; } // Update moveAfter for the next insertion. moveAfter = lastNonLoopBlock; // Note that we've changed the flow graph, and continue without updating // `previous` so that we'll process nextLoopBlock. changedFlowGraph = true; } if ((exitCount == 1) && (lastExit == nullptr)) { // If we happen to have a loop with two exits, one of which goes to an // infinite loop that's lexically nested inside it, where the inner loop // can't be moved out, we can end up in this situation (because // CanTreatAsLoopBlocks will have decremented the count expecting to find // another exit later). Bump the exit count to 2, since downstream code // will not be prepared for null lastExit with exitCount of 1. assert(forgotExit); exitCount = 2; } // Loop compaction was successful return true; } //------------------------------------------------------------------------ // FindInsertionPoint: Find an appropriate spot to which blocks that are // lexically between TOP and BOTTOM but not part of the flow cycle // can be moved. // // Return Value: // Block after which to insert moved blocks. // BasicBlock* FindInsertionPoint() { // Find an insertion point for blocks we're going to move. Move them down // out of the loop, and if possible find a spot that won't break up fall-through. BasicBlock* moveAfter = bottom; while (moveAfter->bbFallsThrough()) { // Keep looking for a better insertion point if we can. BasicBlock* newMoveAfter = TryAdvanceInsertionPoint(moveAfter); if (newMoveAfter == nullptr) { // Ran out of candidate insertion points, so just split up the fall-through. return moveAfter; } moveAfter = newMoveAfter; } return moveAfter; } //------------------------------------------------------------------------ // TryAdvanceInsertionPoint: Find the next legal insertion point after // the given one, if one exists. // // Arguments: // oldMoveAfter - Prior insertion point; find the next after this. // // Return Value: // The next block after `oldMoveAfter` that is a legal insertion point // (i.e. blocks being swept out of the loop can be moved immediately // after it), if one exists, else nullptr. // BasicBlock* TryAdvanceInsertionPoint(BasicBlock* oldMoveAfter) { BasicBlock* newMoveAfter = oldMoveAfter->bbNext; if (!BasicBlock::sameEHRegion(oldMoveAfter, newMoveAfter)) { // Don't cross an EH region boundary. return nullptr; } if (newMoveAfter->KindIs(BBJ_ALWAYS, BBJ_COND)) { unsigned int destNum = newMoveAfter->bbJumpDest->bbNum; if ((destNum >= top->bbNum) && (destNum <= bottom->bbNum) && !loopBlocks.IsMember(destNum)) { // Reversing this branch out of block `newMoveAfter` could confuse this algorithm // (in particular, the edge would still be numerically backwards but no longer be // lexically backwards, so a lexical forward walk from TOP would not find BOTTOM), // so don't do that. // We're checking for BBJ_ALWAYS and BBJ_COND only here -- we don't need to // check for BBJ_SWITCH because we'd never consider it a loop back-edge. return nullptr; } } // Similarly check to see if advancing to `newMoveAfter` would reverse the lexical order // of an edge from the run of blocks being moved to `newMoveAfter` -- doing so would // introduce a new lexical back-edge, which could (maybe?) confuse the loop search // algorithm, and isn't desirable layout anyway. for (BasicBlock* const predBlock : newMoveAfter->PredBlocks()) { unsigned int predNum = predBlock->bbNum; if ((predNum >= top->bbNum) && (predNum <= bottom->bbNum) && !loopBlocks.IsMember(predNum)) { // Don't make this forward edge a backwards edge. return nullptr; } } if (IsRecordedBottom(newMoveAfter)) { // This is the BOTTOM of another loop; don't move any blocks past it, to avoid moving them // out of that loop (we should have already done so when processing that loop if it were legal). return nullptr; } // Advancing the insertion point is ok, except that we can't split up any CallFinally/BBJ_ALWAYS // pair, so if we've got such a pair recurse to see if we can move past the whole thing. return (newMoveAfter->isBBCallAlwaysPair() ? TryAdvanceInsertionPoint(newMoveAfter) : newMoveAfter); } //------------------------------------------------------------------------ // isOuterBottom: Determine if the given block is the BOTTOM of a previously // recorded loop. // // Arguments: // block - Block to check for BOTTOM-ness. // // Return Value: // true - The blocks was recorded as `bottom` of some earlier-processed loop. // false - No loops yet recorded have this block as their `bottom`. // bool IsRecordedBottom(BasicBlock* block) { if (block->bbNum > oldBlockMaxNum) { // This is a new block, which can't be an outer bottom block because we only allow old blocks // as BOTTOM. return false; } return BlockSetOps::IsMember(comp, bottomBlocks, block->bbNum); } //------------------------------------------------------------------------ // CanTreatAsLoopBlocks: If the given range of blocks can be treated as // loop blocks, add them to loopBlockSet and return true. Otherwise, // return false. // // Arguments: // firstNonLoopBlock - First block in the run to be subsumed. // lastNonLoopBlock - Last block in the run to be subsumed. // // Return Value: // true - The blocks from `fistNonLoopBlock` to `lastNonLoopBlock` were // successfully added to `loopBlocks`. // false - Treating the blocks from `fistNonLoopBlock` to `lastNonLoopBlock` // would not be legal (it would induce a side-entry). // // Notes: // `loopBlocks` may be modified even if `false` is returned. // `exitCount` and `lastExit` may be modified if this process identifies // in-loop edges that were previously counted as exits. // bool CanTreatAsLoopBlocks(BasicBlock* firstNonLoopBlock, BasicBlock* lastNonLoopBlock) { for (BasicBlock* const testBlock : comp->Blocks(firstNonLoopBlock, lastNonLoopBlock)) { for (BasicBlock* const testPred : testBlock->PredBlocks()) { unsigned int predPosNum = PositionNum(testPred); unsigned int firstNonLoopPosNum = PositionNum(firstNonLoopBlock); unsigned int lastNonLoopPosNum = PositionNum(lastNonLoopBlock); if (loopBlocks.IsMember(predPosNum) || ((predPosNum >= firstNonLoopPosNum) && (predPosNum <= lastNonLoopPosNum))) { // This pred is in the loop (or what will be the loop if we determine this // run of exit blocks doesn't include a side-entry). if (predPosNum < firstNonLoopPosNum) { // We've already counted this block as an exit, so decrement the count. --exitCount; if (lastExit == testPred) { // Erase this now-bogus `lastExit` entry. lastExit = nullptr; INDEBUG(forgotExit = true); } } } else { // This pred is not in the loop, so this constitutes a side-entry. return false; } } // Either we're going to abort the loop on a subsequent testBlock, or this // testBlock is part of the loop. loopBlocks.Insert(testBlock->bbNum); } // All blocks were ok to leave in the loop. return true; } //------------------------------------------------------------------------ // FixupFallThrough: Re-establish any broken control flow connectivity // and eliminate any "goto-next"s that were created by changing the // given block's lexical follower. // // Arguments: // block - Block whose `bbNext` has changed. // oldNext - Previous value of `block->bbNext`. // newNext - New value of `block->bbNext`. // // Return Value: // If a new block is created to reconnect flow, the new block is // returned; otherwise, nullptr. // BasicBlock* FixupFallThrough(BasicBlock* block, BasicBlock* oldNext, BasicBlock* newNext) { // If we create a new block, that will be our return value. BasicBlock* newBlock = nullptr; if (block->bbFallsThrough()) { // Need to reconnect the flow from `block` to `oldNext`. if ((block->bbJumpKind == BBJ_COND) && (block->bbJumpDest == newNext)) { // Reverse the jump condition GenTree* test = block->lastNode(); noway_assert(test->OperIsConditionalJump()); if (test->OperGet() == GT_JTRUE) { GenTree* cond = comp->gtReverseCond(test->AsOp()->gtOp1); assert(cond == test->AsOp()->gtOp1); // Ensure `gtReverseCond` did not create a new node. test->AsOp()->gtOp1 = cond; } else { comp->gtReverseCond(test); } // Redirect the Conditional JUMP to go to `oldNext` block->bbJumpDest = oldNext; } else { // Insert an unconditional jump to `oldNext` just after `block`. newBlock = comp->fgConnectFallThrough(block, oldNext); noway_assert((newBlock == nullptr) || loopBlocks.CanRepresent(newBlock->bbNum)); } } else if ((block->bbJumpKind == BBJ_ALWAYS) && (block->bbJumpDest == newNext)) { // We've made `block`'s jump target its bbNext, so remove the jump. if (!comp->fgOptimizeBranchToNext(block, newNext, block->bbPrev)) { // If optimizing away the goto-next failed for some reason, mark it KEEP_BBJ_ALWAYS to // prevent assertions from complaining about it. block->bbFlags |= BBF_KEEP_BBJ_ALWAYS; } } // Make sure we don't leave around a goto-next unless it's marked KEEP_BBJ_ALWAYS. assert(!block->KindIs(BBJ_COND, BBJ_ALWAYS) || (block->bbJumpDest != newNext) || ((block->bbFlags & BBF_KEEP_BBJ_ALWAYS) != 0)); return newBlock; } //------------------------------------------------------------------------ // CheckForExit: Check if the given block has any successor edges that are // loop exits, and update `lastExit` and `exitCount` if so. // // Arguments: // block - Block whose successor edges are to be checked. // // Notes: // If one block has multiple exiting successor edges, those are counted // as multiple exits in `exitCount`. // void CheckForExit(BasicBlock* block) { BasicBlock* exitPoint; switch (block->bbJumpKind) { case BBJ_COND: case BBJ_CALLFINALLY: case BBJ_ALWAYS: case BBJ_EHCATCHRET: assert(block->bbJumpDest); exitPoint = block->bbJumpDest; if (!loopBlocks.IsMember(exitPoint->bbNum)) { // Exit from a block other than BOTTOM lastExit = block; exitCount++; } break; case BBJ_NONE: break; case BBJ_EHFINALLYRET: case BBJ_EHFILTERRET: // The "try" associated with this "finally" must be in the same loop, so the // finally block will return control inside the loop. break; case BBJ_THROW: case BBJ_RETURN: // Those are exits from the loop lastExit = block; exitCount++; break; case BBJ_SWITCH: for (BasicBlock* const exitPoint : block->SwitchTargets()) { if (!loopBlocks.IsMember(exitPoint->bbNum)) { lastExit = block; exitCount++; } } break; default: noway_assert(!"Unexpected bbJumpKind"); break; } if (block->bbFallsThrough() && !loopBlocks.IsMember(block->bbNext->bbNum)) { // Found a fall-through exit. lastExit = block; exitCount++; } } }; } // end (anonymous) namespace //------------------------------------------------------------------------ // optFindNaturalLoops: Find the natural loops, using dominators. Note that the test for // a loop is slightly different from the standard one, because we have not done a depth // first reordering of the basic blocks. // // See LoopSearch class comment header for a description of the loops found. // // We will find and record a maximum of BasicBlock::MAX_LOOP_NUM loops (currently 64). // void Compiler::optFindNaturalLoops() { #ifdef DEBUG if (verbose) { printf("*************** In optFindNaturalLoops()\n"); } #endif // DEBUG noway_assert(fgDomsComputed); assert(fgHasLoops); #if COUNT_LOOPS hasMethodLoops = false; loopsThisMethod = 0; loopOverflowThisMethod = false; #endif LoopSearch search(this); for (BasicBlock* head = fgFirstBB; head->bbNext != nullptr; head = head->bbNext) { BasicBlock* top = head->bbNext; // Blocks that are rarely run have a zero bbWeight and should never be optimized here. if (top->bbWeight == BB_ZERO_WEIGHT) { continue; } for (BasicBlock* const predBlock : top->PredBlocks()) { if (search.FindLoop(head, top, predBlock)) { // Found a loop; record it and see if we've hit the limit. bool recordedLoop = search.RecordLoop(); (void)recordedLoop; // avoid unusued variable warnings in COUNT_LOOPS and !DEBUG #if COUNT_LOOPS if (!hasMethodLoops) { // Mark the method as containing natural loops totalLoopMethods++; hasMethodLoops = true; } // Increment total number of loops found totalLoopCount++; loopsThisMethod++; // Keep track of the number of exits loopExitCountTable.record(static_cast<unsigned>(search.GetExitCount())); // Note that we continue to look for loops even if // (optLoopCount == BasicBlock::MAX_LOOP_NUM), in contrast to the !COUNT_LOOPS code below. // This gives us a better count and stats. Hopefully it doesn't affect actual codegen. CLANG_FORMAT_COMMENT_ANCHOR; #else // COUNT_LOOPS assert(recordedLoop); if (optLoopCount == BasicBlock::MAX_LOOP_NUM) { // We won't be able to record any more loops, so stop looking. goto NO_MORE_LOOPS; } #endif // COUNT_LOOPS // Continue searching preds of `top` to see if any other are // back-edges (this can happen for nested loops). The iteration // is safe because the compaction we do only modifies predecessor // lists of blocks that gain or lose fall-through from their // `bbPrev`, but since the motion is from within the loop to below // it, we know we're not altering the relationship between `top` // and its `bbPrev`. } } } #if !COUNT_LOOPS NO_MORE_LOOPS: #endif // !COUNT_LOOPS #if COUNT_LOOPS loopCountTable.record(loopsThisMethod); if (maxLoopsPerMethod < loopsThisMethod) { maxLoopsPerMethod = loopsThisMethod; } if (loopOverflowThisMethod) { totalLoopOverflows++; } #endif // COUNT_LOOPS bool mod = search.ChangedFlowGraph(); if (mod) { // Need to renumber blocks now since loop canonicalization // depends on it; can defer the rest of fgUpdateChangedFlowGraph() // until after canonicalizing loops. Dominator information is // recorded in terms of block numbers, so flag it invalid. fgDomsComputed = false; fgRenumberBlocks(); } // Now the loop indices are stable. We can figure out parent/child relationships // (using table indices to name loops), and label blocks. for (unsigned char loopInd = 1; loopInd < optLoopCount; loopInd++) { for (unsigned char possibleParent = loopInd; possibleParent > 0;) { possibleParent--; if (optLoopTable[possibleParent].lpContains(optLoopTable[loopInd])) { optLoopTable[loopInd].lpParent = possibleParent; optLoopTable[loopInd].lpSibling = optLoopTable[possibleParent].lpChild; optLoopTable[possibleParent].lpChild = loopInd; break; } } } // Now label the blocks with the innermost loop to which they belong. Since parents // precede children in the table, doing the labeling for each loop in order will achieve // this -- the innermost loop labeling will be done last. (Inner loop blocks will be // labeled multiple times before being correct at the end.) for (unsigned char loopInd = 0; loopInd < optLoopCount; loopInd++) { for (BasicBlock* const blk : optLoopTable[loopInd].LoopBlocks()) { blk->bbNatLoopNum = loopInd; } } // Make sure that loops are canonical: that every loop has a unique "top", by creating an empty "nop" // one, if necessary, for loops containing others that share a "top." for (unsigned char loopInd = 0; loopInd < optLoopCount; loopInd++) { // Traverse the outermost loops as entries into the loop nest; so skip non-outermost. if (optLoopTable[loopInd].lpParent != BasicBlock::NOT_IN_LOOP) { continue; } // Otherwise... if (optCanonicalizeLoopNest(loopInd)) { mod = true; } } if (mod) { constexpr bool computePreds = true; fgUpdateChangedFlowGraph(computePreds); } if (false /* pre-header stress */) { // Stress mode: aggressively create loop pre-header for every loop. for (unsigned loopInd = 0; loopInd < optLoopCount; loopInd++) { fgCreateLoopPreHeader(loopInd); } if (fgModified) { // The predecessors were maintained in fgCreateLoopPreHeader; don't rebuild them. constexpr bool computePreds = false; constexpr bool computeDoms = true; fgUpdateChangedFlowGraph(computePreds, computeDoms); } } #ifdef DEBUG if (verbose && (optLoopCount > 0)) { optPrintLoopTable(); } #endif // DEBUG } //------------------------------------------------------------------------ // optIdentifyLoopsForAlignment: Determine which loops should be considered for alignment. // // All innermost loops whose block weight meets a threshold are candidates for alignment. // The `first` block of the loop is marked with the BBF_LOOP_ALIGN flag to indicate this // (the loop table itself is not changed). // // Depends on the loop table, and on block weights being set. // void Compiler::optIdentifyLoopsForAlignment() { #if FEATURE_LOOP_ALIGN if (codeGen->ShouldAlignLoops()) { for (BasicBlock::loopNumber loopInd = 0; loopInd < optLoopCount; loopInd++) { // An innerloop candidate that might need alignment if (optLoopTable[loopInd].lpChild == BasicBlock::NOT_IN_LOOP) { BasicBlock* top = optLoopTable[loopInd].lpTop; weight_t topWeight = top->getBBWeight(this); if (topWeight >= (opts.compJitAlignLoopMinBlockWeight * BB_UNITY_WEIGHT)) { // Sometimes with JitOptRepeat > 1, we might end up finding the loops twice. In such // cases, make sure to count them just once. if (!top->isLoopAlign()) { loopAlignCandidates++; top->bbFlags |= BBF_LOOP_ALIGN; JITDUMP(FMT_LP " that starts at " FMT_BB " needs alignment, weight=" FMT_WT ".\n", loopInd, top->bbNum, top->getBBWeight(this)); } } else { JITDUMP("Skip alignment for " FMT_LP " that starts at " FMT_BB " weight=" FMT_WT ".\n", loopInd, top->bbNum, topWeight); } } } } #endif } //------------------------------------------------------------------------ // optRedirectBlock: Replace the branch successors of a block based on a block map. // // Updates the successors of `blk`: if `blk2` is a branch successor of `blk`, and there is a mapping // for `blk2->blk3` in `redirectMap`, change `blk` so that `blk3` is this branch successor. // // Note that fall-through successors are not modified, including predecessor lists. // // Arguments: // blk - block to redirect // redirectMap - block->block map specifying how the `blk` target will be redirected. // updatePreds - if `true`, update the predecessor lists to match. // void Compiler::optRedirectBlock(BasicBlock* blk, BlockToBlockMap* redirectMap, const bool updatePreds) { BasicBlock* newJumpDest = nullptr; switch (blk->bbJumpKind) { case BBJ_NONE: case BBJ_THROW: case BBJ_RETURN: case BBJ_EHFILTERRET: case BBJ_EHFINALLYRET: case BBJ_EHCATCHRET: // These have no jump destination to update. break; case BBJ_ALWAYS: case BBJ_LEAVE: case BBJ_CALLFINALLY: case BBJ_COND: // All of these have a single jump destination to update. if (redirectMap->Lookup(blk->bbJumpDest, &newJumpDest)) { if (updatePreds) { fgRemoveRefPred(blk->bbJumpDest, blk); fgAddRefPred(newJumpDest, blk); } blk->bbJumpDest = newJumpDest; } break; case BBJ_SWITCH: { bool redirected = false; for (unsigned i = 0; i < blk->bbJumpSwt->bbsCount; i++) { BasicBlock* switchDest = blk->bbJumpSwt->bbsDstTab[i]; if (redirectMap->Lookup(switchDest, &newJumpDest)) { if (updatePreds) { fgRemoveRefPred(switchDest, blk); fgAddRefPred(newJumpDest, blk); } blk->bbJumpSwt->bbsDstTab[i] = newJumpDest; redirected = true; } } // If any redirections happened, invalidate the switch table map for the switch. if (redirected) { // Don't create a new map just to try to remove an entry. BlockToSwitchDescMap* switchMap = GetSwitchDescMap(/* createIfNull */ false); if (switchMap != nullptr) { switchMap->Remove(blk); } } } break; default: unreached(); } } // TODO-Cleanup: This should be a static member of the BasicBlock class. void Compiler::optCopyBlkDest(BasicBlock* from, BasicBlock* to) { assert(from->bbJumpKind == to->bbJumpKind); // Precondition. // copy the jump destination(s) from "from" to "to". switch (to->bbJumpKind) { case BBJ_ALWAYS: case BBJ_LEAVE: case BBJ_CALLFINALLY: case BBJ_COND: // All of these have a single jump destination to update. to->bbJumpDest = from->bbJumpDest; break; case BBJ_SWITCH: to->bbJumpSwt = new (this, CMK_BasicBlock) BBswtDesc(this, from->bbJumpSwt); break; default: break; } } // Returns true if 'block' is an entry block for any loop in 'optLoopTable' bool Compiler::optIsLoopEntry(BasicBlock* block) const { for (unsigned char loopInd = 0; loopInd < optLoopCount; loopInd++) { if ((optLoopTable[loopInd].lpFlags & LPFLG_REMOVED) != 0) { continue; } if (optLoopTable[loopInd].lpEntry == block) { return true; } } return false; } // Canonicalize the loop nest rooted at parent loop 'loopInd'. // Returns 'true' if the flow graph is modified. bool Compiler::optCanonicalizeLoopNest(unsigned char loopInd) { bool modified = false; // Is the top of the current loop in any nested loop? if (optLoopTable[loopInd].lpTop->bbNatLoopNum != loopInd) { if (optCanonicalizeLoop(loopInd)) { modified = true; } } for (unsigned char child = optLoopTable[loopInd].lpChild; // child != BasicBlock::NOT_IN_LOOP; // child = optLoopTable[child].lpSibling) { if (optCanonicalizeLoopNest(child)) { modified = true; } } return modified; } bool Compiler::optCanonicalizeLoop(unsigned char loopInd) { // Is the top uniquely part of the current loop? BasicBlock* t = optLoopTable[loopInd].lpTop; if (t->bbNatLoopNum == loopInd) { return false; } JITDUMP("in optCanonicalizeLoop: " FMT_LP " has top " FMT_BB " (bottom " FMT_BB ") with natural loop number " FMT_LP ": need to canonicalize\n", loopInd, t->bbNum, optLoopTable[loopInd].lpBottom->bbNum, t->bbNatLoopNum); // Otherwise, the top of this loop is also part of a nested loop. // // Insert a new unique top for this loop. We must be careful to put this new // block in the correct EH region. Note that t->bbPrev might be in a different // EH region. For example: // // try { // ... // BB07 // } // BB08 // "first" // // In this case, first->bbPrev is BB07, which is in a different 'try' region. // On the other hand, the first block of multiple loops might be the first // block of a 'try' region that is completely contained in the multiple loops. // for example: // // BB08 try { } // ... // BB10 BBJ_ALWAYS => BB08 // ... // BB12 BBJ_ALWAYS => BB08 // // Here, we have two loops, both with BB08 as the "first" block. Block BB08 // is a single-block "try" region. Neither loop "bottom" block is in the same // "try" region as BB08. This is legal because you can jump to the first block // of a try region. With EH normalization, no two "try" regions will share // this block. In this case, we need to insert a new block for the outer loop // in the same EH region as the branch from the "bottom": // // BB30 BBJ_NONE // BB08 try { } // ... // BB10 BBJ_ALWAYS => BB08 // ... // BB12 BBJ_ALWAYS => BB30 // // Another possibility is that the "first" block of the loop nest can be the first block // of a "try" region that also has other predecessors than those in the loop, or even in // the "try" region (since blocks can target the first block of a "try" region). For example: // // BB08 try { // ... // BB10 BBJ_ALWAYS => BB08 // ... // BB12 BBJ_ALWAYS => BB08 // BB13 } // ... // BB20 BBJ_ALWAYS => BB08 // ... // BB25 BBJ_ALWAYS => BB08 // // Here, BB08 has 4 flow graph predecessors: BB10, BB12, BB20, BB25. These are all potential loop // bottoms, for four possible nested loops. However, we require all the loop bottoms to be in the // same EH region. For loops BB08..BB10 and BB08..BB12, we need to add a new "top" block within // the try region, immediately before BB08. The bottom of the loop BB08..BB10 loop will target the // old BB08, and the bottom of the BB08..BB12 loop will target the new loop header. The other branches // (BB20, BB25) must target the new loop header, both for correctness, and to avoid the illegal // situation of branching to a non-first block of a 'try' region. // // We can also have a loop nest where the "first" block is outside of a "try" region // and the back edges are inside a "try" region, for example: // // BB02 // "first" // ... // BB09 try { BBJ_COND => BB02 // ... // BB15 BBJ_COND => BB02 // ... // BB21 } // end of "try" // // In this case, both loop back edges were formed by "leave" instructions that were // imported into branches that were later made conditional. In this case, we don't // want to copy the EH region of the back edge, since that would create a block // outside of and disjoint with the "try" region of the back edge. However, to // simplify things, we disqualify this type of loop, so we should never see this here. BasicBlock* h = optLoopTable[loopInd].lpHead; BasicBlock* b = optLoopTable[loopInd].lpBottom; // The loop must be entirely contained within a single handler region. assert(BasicBlock::sameHndRegion(t, b)); // If the bottom block is in the same "try" region, then we extend the EH // region. Otherwise, we add the new block outside the "try" region. const bool extendRegion = BasicBlock::sameTryRegion(t, b); BasicBlock* newT = fgNewBBbefore(BBJ_NONE, t, extendRegion); if (!extendRegion) { // We need to set the EH region manually. Set it to be the same // as the bottom block. newT->copyEHRegion(b); } // The new block can reach the same set of blocks as the old one, but don't try to reflect // that in its reachability set here -- creating the new block may have changed the BlockSet // representation from short to long, and canonicalizing loops is immediately followed by // a call to fgUpdateChangedFlowGraph which will recompute the reachability sets anyway. // Redirect the "bottom" of the current loop to "newT". BlockToBlockMap* blockMap = new (getAllocator(CMK_LoopOpt)) BlockToBlockMap(getAllocator(CMK_LoopOpt)); blockMap->Set(t, newT); optRedirectBlock(b, blockMap); // Redirect non-loop preds of "t" to also go to "newT". Inner loops that also branch to "t" should continue // to do so. However, there maybe be other predecessors from outside the loop nest that need to be updated // to point to "newT". This normally wouldn't happen, since they too would be part of the loop nest. However, // they might have been prevented from participating in the loop nest due to different EH nesting, or some // other reason. // // Note that optRedirectBlock doesn't update the predecessors list. So, if the same 't' block is processed // multiple times while canonicalizing multiple loop nests, we'll attempt to redirect a predecessor multiple times. // This is ok, because after the first redirection, the topPredBlock branch target will no longer match the source // edge of the blockMap, so nothing will happen. bool firstPred = true; for (BasicBlock* const topPredBlock : t->PredBlocks()) { // Skip if topPredBlock is in the loop. // Note that this uses block number to detect membership in the loop. We are adding blocks during // canonicalization, and those block numbers will be new, and larger than previous blocks. However, we work // outside-in, so we shouldn't encounter the new blocks at the loop boundaries, or in the predecessor lists. if (t->bbNum <= topPredBlock->bbNum && topPredBlock->bbNum <= b->bbNum) { JITDUMP("in optCanonicalizeLoop: 'top' predecessor " FMT_BB " is in the range of " FMT_LP " (" FMT_BB ".." FMT_BB "); not redirecting its bottom edge\n", topPredBlock->bbNum, loopInd, t->bbNum, b->bbNum); continue; } JITDUMP("in optCanonicalizeLoop: redirect top predecessor " FMT_BB " to " FMT_BB "\n", topPredBlock->bbNum, newT->bbNum); optRedirectBlock(topPredBlock, blockMap); // When we have profile data then the 'newT' block will inherit topPredBlock profile weight if (topPredBlock->hasProfileWeight()) { // This corrects an issue when the topPredBlock has a profile based weight // if (firstPred) { JITDUMP("in optCanonicalizeLoop: block " FMT_BB " will inheritWeight from " FMT_BB "\n", newT->bbNum, topPredBlock->bbNum); newT->inheritWeight(topPredBlock); firstPred = false; } else { JITDUMP("in optCanonicalizeLoop: block " FMT_BB " will also contribute to the weight of " FMT_BB "\n", newT->bbNum, topPredBlock->bbNum); weight_t newWeight = newT->getBBWeight(this) + topPredBlock->getBBWeight(this); newT->setBBProfileWeight(newWeight); } } } assert(newT->bbNext == t); // If it had been a do-while loop (top == entry), update entry, as well. BasicBlock* origE = optLoopTable[loopInd].lpEntry; if (optLoopTable[loopInd].lpTop == origE) { optLoopTable[loopInd].lpEntry = newT; } optLoopTable[loopInd].lpTop = newT; newT->bbNatLoopNum = loopInd; JITDUMP("in optCanonicalizeLoop: made new block " FMT_BB " [%p] the new unique top of loop %d.\n", newT->bbNum, dspPtr(newT), loopInd); // Make sure the head block still goes to the entry... if (h->bbJumpKind == BBJ_NONE && h->bbNext != optLoopTable[loopInd].lpEntry) { h->bbJumpKind = BBJ_ALWAYS; h->bbJumpDest = optLoopTable[loopInd].lpEntry; } else if (h->bbJumpKind == BBJ_COND && h->bbNext == newT && newT != optLoopTable[loopInd].lpEntry) { BasicBlock* h2 = fgNewBBafter(BBJ_ALWAYS, h, /*extendRegion*/ true); optLoopTable[loopInd].lpHead = h2; h2->bbJumpDest = optLoopTable[loopInd].lpEntry; h2->bbStmtList = nullptr; fgInsertStmtAtEnd(h2, fgNewStmtFromTree(gtNewOperNode(GT_NOP, TYP_VOID, nullptr))); } // If any loops nested in "loopInd" have the same head and entry as "loopInd", // it must be the case that they were do-while's (since "h" fell through to the entry). // The new node "newT" becomes the head of such loops. for (unsigned char childLoop = optLoopTable[loopInd].lpChild; // childLoop != BasicBlock::NOT_IN_LOOP; // childLoop = optLoopTable[childLoop].lpSibling) { if (optLoopTable[childLoop].lpEntry == origE && optLoopTable[childLoop].lpHead == h && newT->bbJumpKind == BBJ_NONE && newT->bbNext == origE) { optUpdateLoopHead(childLoop, h, newT); } } return true; } //----------------------------------------------------------------------------- // optLoopContains: Check if one loop contains another // // Arguments: // l1 -- loop num of containing loop (must be valid loop num) // l2 -- loop num of contained loop (valid loop num, or NOT_IN_LOOP) // // Returns: // True if loop described by l2 is contained within l1. // // Notes: // A loop contains itself. // bool Compiler::optLoopContains(unsigned l1, unsigned l2) const { assert(l1 < optLoopCount); assert((l2 < optLoopCount) || (l2 == BasicBlock::NOT_IN_LOOP)); if (l1 == l2) { return true; } else if (l2 == BasicBlock::NOT_IN_LOOP) { return false; } else { return optLoopContains(l1, optLoopTable[l2].lpParent); } } void Compiler::optUpdateLoopHead(unsigned loopInd, BasicBlock* from, BasicBlock* to) { assert(optLoopTable[loopInd].lpHead == from); optLoopTable[loopInd].lpHead = to; for (unsigned char childLoop = optLoopTable[loopInd].lpChild; childLoop != BasicBlock::NOT_IN_LOOP; childLoop = optLoopTable[childLoop].lpSibling) { if (optLoopTable[childLoop].lpHead == from) { optUpdateLoopHead(childLoop, from, to); } } } //----------------------------------------------------------------------------- // optIterSmallOverflow: Helper for loop unrolling. Determine if "i += const" will // cause an overflow exception for the small types. // // Arguments: // iterAtExit - iteration constant at loop exit // incrType - type of increment // // Returns: // true if overflow // // static bool Compiler::optIterSmallOverflow(int iterAtExit, var_types incrType) { int type_MAX; switch (incrType) { case TYP_BYTE: type_MAX = SCHAR_MAX; break; case TYP_UBYTE: type_MAX = UCHAR_MAX; break; case TYP_SHORT: type_MAX = SHRT_MAX; break; case TYP_USHORT: type_MAX = USHRT_MAX; break; case TYP_UINT: // Detected by checking for 32bit .... case TYP_INT: return false; // ... overflow same as done for TYP_INT default: NO_WAY("Bad type"); } if (iterAtExit > type_MAX) { return true; } else { return false; } } //----------------------------------------------------------------------------- // optIterSmallUnderflow: Helper for loop unrolling. Determine if "i -= const" will // cause an underflow exception for the small types. // // Arguments: // iterAtExit - iteration constant at loop exit // decrType - type of decrement // // Returns: // true if overflow // // static bool Compiler::optIterSmallUnderflow(int iterAtExit, var_types decrType) { int type_MIN; switch (decrType) { case TYP_BYTE: type_MIN = SCHAR_MIN; break; case TYP_SHORT: type_MIN = SHRT_MIN; break; case TYP_UBYTE: type_MIN = 0; break; case TYP_USHORT: type_MIN = 0; break; case TYP_UINT: // Detected by checking for 32bit .... case TYP_INT: return false; // ... underflow same as done for TYP_INT default: NO_WAY("Bad type"); } if (iterAtExit < type_MIN) { return true; } else { return false; } } //----------------------------------------------------------------------------- // optComputeLoopRep: Helper for loop unrolling. Computes the number of repetitions // in a constant loop. // // Arguments: // constInit - loop constant initial value // constLimit - loop constant limit // iterInc - loop iteration increment // iterOper - loop iteration increment operator (ADD, SUB, etc.) // iterOperType - iteration operator type // testOper - type of loop test (i.e. GT_LE, GT_GE, etc.) // unsTest - true if test is unsigned // dupCond - true if the loop head contains a test which skips this loop // iterCount - *iterCount is set to the iteration count, if the function returns `true` // // Returns: // true if the loop has a constant repetition count, false if that cannot be proven // bool Compiler::optComputeLoopRep(int constInit, int constLimit, int iterInc, genTreeOps iterOper, var_types iterOperType, genTreeOps testOper, bool unsTest, bool dupCond, unsigned* iterCount) { noway_assert(genActualType(iterOperType) == TYP_INT); __int64 constInitX; __int64 constLimitX; unsigned loopCount; int iterSign; // Using this, we can just do a signed comparison with other 32 bit values. if (unsTest) { constLimitX = (unsigned int)constLimit; } else { constLimitX = (signed int)constLimit; } switch (iterOperType) { // For small types, the iteration operator will narrow these values if big #define INIT_ITER_BY_TYPE(type) \ constInitX = (type)constInit; \ iterInc = (type)iterInc; case TYP_BYTE: INIT_ITER_BY_TYPE(signed char); break; case TYP_UBYTE: INIT_ITER_BY_TYPE(unsigned char); break; case TYP_SHORT: INIT_ITER_BY_TYPE(signed short); break; case TYP_USHORT: INIT_ITER_BY_TYPE(unsigned short); break; // For the big types, 32 bit arithmetic is performed case TYP_INT: case TYP_UINT: if (unsTest) { constInitX = (unsigned int)constInit; } else { constInitX = (signed int)constInit; } break; default: noway_assert(!"Bad type"); NO_WAY("Bad type"); } // If iterInc is zero we have an infinite loop. if (iterInc == 0) { return false; } // Set iterSign to +1 for positive iterInc and -1 for negative iterInc. iterSign = (iterInc > 0) ? +1 : -1; // Initialize loopCount to zero. loopCount = 0; // If dupCond is true then the loop head contains a test which skips // this loop, if the constInit does not pass the loop test. // Such a loop can execute zero times. // If dupCond is false then we have a true do-while loop which we // always execute the loop once before performing the loop test if (!dupCond) { loopCount += 1; constInitX += iterInc; } // bail if count is based on wrap-around math if (iterInc > 0) { if (constLimitX < constInitX) { return false; } } else if (constLimitX > constInitX) { return false; } // Compute the number of repetitions. switch (testOper) { __int64 iterAtExitX; case GT_EQ: // Something like "for (i=init; i == lim; i++)" doesn't make any sense. return false; case GT_NE: // Consider: "for (i = init; i != lim; i += const)" // This is tricky since it may have a constant number of iterations or loop forever. // We have to compute "(lim - init) mod iterInc" to see if it is zero. // If "mod iterInc" is not zero then the limit test will miss and a wrap will occur // which is probably not what the end user wanted, but it is legal. if (iterInc > 0) { // Stepping by one, i.e. Mod with 1 is always zero. if (iterInc != 1) { if (((constLimitX - constInitX) % iterInc) != 0) { return false; } } } else { noway_assert(iterInc < 0); // Stepping by -1, i.e. Mod with 1 is always zero. if (iterInc != -1) { if (((constInitX - constLimitX) % (-iterInc)) != 0) { return false; } } } switch (iterOper) { case GT_SUB: iterInc = -iterInc; FALLTHROUGH; case GT_ADD: if (constInitX != constLimitX) { loopCount += (unsigned)((constLimitX - constInitX - iterSign) / iterInc) + 1; } iterAtExitX = (int)(constInitX + iterInc * (int)loopCount); if (unsTest) { iterAtExitX = (unsigned)iterAtExitX; } // Check if iteration incr will cause overflow for small types if (optIterSmallOverflow((int)iterAtExitX, iterOperType)) { return false; } // iterator with 32bit overflow. Bad for TYP_(U)INT if (iterAtExitX < constLimitX) { return false; } *iterCount = loopCount; return true; case GT_MUL: case GT_DIV: case GT_RSH: case GT_LSH: case GT_UDIV: return false; default: noway_assert(!"Unknown operator for loop iterator"); return false; } case GT_LT: switch (iterOper) { case GT_SUB: iterInc = -iterInc; FALLTHROUGH; case GT_ADD: if (constInitX < constLimitX) { loopCount += (unsigned)((constLimitX - constInitX - iterSign) / iterInc) + 1; } iterAtExitX = (int)(constInitX + iterInc * (int)loopCount); if (unsTest) { iterAtExitX = (unsigned)iterAtExitX; } // Check if iteration incr will cause overflow for small types if (optIterSmallOverflow((int)iterAtExitX, iterOperType)) { return false; } // iterator with 32bit overflow. Bad for TYP_(U)INT if (iterAtExitX < constLimitX) { return false; } *iterCount = loopCount; return true; case GT_MUL: case GT_DIV: case GT_RSH: case GT_LSH: case GT_UDIV: return false; default: noway_assert(!"Unknown operator for loop iterator"); return false; } case GT_LE: switch (iterOper) { case GT_SUB: iterInc = -iterInc; FALLTHROUGH; case GT_ADD: if (constInitX <= constLimitX) { loopCount += (unsigned)((constLimitX - constInitX) / iterInc) + 1; } iterAtExitX = (int)(constInitX + iterInc * (int)loopCount); if (unsTest) { iterAtExitX = (unsigned)iterAtExitX; } // Check if iteration incr will cause overflow for small types if (optIterSmallOverflow((int)iterAtExitX, iterOperType)) { return false; } // iterator with 32bit overflow. Bad for TYP_(U)INT if (iterAtExitX <= constLimitX) { return false; } *iterCount = loopCount; return true; case GT_MUL: case GT_DIV: case GT_RSH: case GT_LSH: case GT_UDIV: return false; default: noway_assert(!"Unknown operator for loop iterator"); return false; } case GT_GT: switch (iterOper) { case GT_SUB: iterInc = -iterInc; FALLTHROUGH; case GT_ADD: if (constInitX > constLimitX) { loopCount += (unsigned)((constLimitX - constInitX - iterSign) / iterInc) + 1; } iterAtExitX = (int)(constInitX + iterInc * (int)loopCount); if (unsTest) { iterAtExitX = (unsigned)iterAtExitX; } // Check if small types will underflow if (optIterSmallUnderflow((int)iterAtExitX, iterOperType)) { return false; } // iterator with 32bit underflow. Bad for TYP_INT and unsigneds if (iterAtExitX > constLimitX) { return false; } *iterCount = loopCount; return true; case GT_MUL: case GT_DIV: case GT_RSH: case GT_LSH: case GT_UDIV: return false; default: noway_assert(!"Unknown operator for loop iterator"); return false; } case GT_GE: switch (iterOper) { case GT_SUB: iterInc = -iterInc; FALLTHROUGH; case GT_ADD: if (constInitX >= constLimitX) { loopCount += (unsigned)((constLimitX - constInitX) / iterInc) + 1; } iterAtExitX = (int)(constInitX + iterInc * (int)loopCount); if (unsTest) { iterAtExitX = (unsigned)iterAtExitX; } // Check if small types will underflow if (optIterSmallUnderflow((int)iterAtExitX, iterOperType)) { return false; } // iterator with 32bit underflow. Bad for TYP_INT and unsigneds if (iterAtExitX >= constLimitX) { return false; } *iterCount = loopCount; return true; case GT_MUL: case GT_DIV: case GT_RSH: case GT_LSH: case GT_UDIV: return false; default: noway_assert(!"Unknown operator for loop iterator"); return false; } default: noway_assert(!"Unknown operator for loop condition"); } return false; } #ifdef _PREFAST_ #pragma warning(push) #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function #endif //----------------------------------------------------------------------------- // optUnrollLoops: Look for loop unrolling candidates and unroll them. // // Loops must be of the form: // for (i=icon; i<icon; i++) { ... } // // Loops handled are fully unrolled; there is no partial unrolling. // // Limitations: only the following loop types are handled: // 1. "while" loops (top entry) // 2. constant initializer, constant bound // 3. The entire loop must be in the same EH region. // 4. The loop iteration variable can't be address exposed. // 5. The loop iteration variable can't be a promoted struct field. // 6. We must be able to calculate the total constant iteration count. // 7. On x86, there is a limit to the number of return blocks. So if there are return blocks in the loop that // would be unrolled, the unrolled code can't exceed that limit. // // Cost heuristics: // 1. there are cost metrics for maximum number of allowed iterations, and maximum unroll size // 2. single-iteration loops are always allowed (to eliminate the loop structure). // 3. otherwise, only loops where the limit is Vector<T>.Length are currently allowed // // In stress modes, these heuristic limits are expanded, and loops aren't required to have the // Vector<T>.Length limit. // // Loops are processed from innermost to outermost order, to attempt to unroll the most nested loops first. // // Returns: // suitable phase status // PhaseStatus Compiler::optUnrollLoops() { if (compCodeOpt() == SMALL_CODE) { return PhaseStatus::MODIFIED_NOTHING; } if (optLoopCount == 0) { return PhaseStatus::MODIFIED_NOTHING; } #ifdef DEBUG if (JitConfig.JitNoUnroll()) { return PhaseStatus::MODIFIED_NOTHING; } #endif #ifdef DEBUG if (verbose) { printf("*************** In optUnrollLoops()\n"); } #endif /* Look for loop unrolling candidates */ bool change = false; bool anyNestedLoopsUnrolled = false; INDEBUG(int unrollCount = 0); // count of loops unrolled INDEBUG(int unrollFailures = 0); // count of loops attempted to be unrolled, but failed static const unsigned ITER_LIMIT[COUNT_OPT_CODE + 1] = { 10, // BLENDED_CODE 0, // SMALL_CODE 20, // FAST_CODE 0 // COUNT_OPT_CODE }; assert(ITER_LIMIT[SMALL_CODE] == 0); assert(ITER_LIMIT[COUNT_OPT_CODE] == 0); unsigned iterLimit = ITER_LIMIT[compCodeOpt()]; #ifdef DEBUG if (compStressCompile(STRESS_UNROLL_LOOPS, 50)) { iterLimit *= 10; } #endif static const int UNROLL_LIMIT_SZ[COUNT_OPT_CODE + 1] = { 300, // BLENDED_CODE 0, // SMALL_CODE 600, // FAST_CODE 0 // COUNT_OPT_CODE }; assert(UNROLL_LIMIT_SZ[SMALL_CODE] == 0); assert(UNROLL_LIMIT_SZ[COUNT_OPT_CODE] == 0); // Visit loops from highest to lowest number to visit them in innermost to outermost order. for (unsigned lnum = optLoopCount - 1; lnum != ~0U; --lnum) { // This is necessary due to an apparent analysis limitation since // optLoopCount must be strictly greater than 0 upon entry and lnum // cannot wrap due to the loop termination condition. PREFAST_ASSUME(lnum != 0U - 1); LoopDsc& loop = optLoopTable[lnum]; BasicBlock* head; BasicBlock* top; BasicBlock* bottom; BasicBlock* initBlock; bool dupCond; // Does the 'head' block contain a duplicate loop condition (zero trip test)? int lbeg; // initial value for iterator int llim; // limit value for iterator unsigned lvar; // iterator lclVar # int iterInc; // value to increment the iterator genTreeOps iterOper; // type of iterator increment (i.e. ADD, SUB, etc.) var_types iterOperType; // type result of the oper (for overflow instrs) genTreeOps testOper; // type of loop test (i.e. GT_LE, GT_GE, etc.) bool unsTest; // Is the comparison unsigned? unsigned loopRetCount; // number of BBJ_RETURN blocks in loop unsigned totalIter; // total number of iterations in the constant loop const unsigned loopFlags = loop.lpFlags; // Check for required flags: // LPFLG_CONST_INIT - required because this transform only handles full unrolls // LPFLG_CONST_LIMIT - required because this transform only handles full unrolls const unsigned requiredFlags = LPFLG_CONST_INIT | LPFLG_CONST_LIMIT; if ((loopFlags & requiredFlags) != requiredFlags) { // Don't print to the JitDump about this common case. continue; } // Ignore if removed or marked as not unrollable. if (loopFlags & (LPFLG_DONT_UNROLL | LPFLG_REMOVED)) { // Don't print to the JitDump about this common case. continue; } // This transform only handles loops of this form if (!loop.lpIsTopEntry()) { JITDUMP("Failed to unroll loop " FMT_LP ": not top entry\n", lnum); continue; } head = loop.lpHead; noway_assert(head != nullptr); top = loop.lpTop; noway_assert(top != nullptr); bottom = loop.lpBottom; noway_assert(bottom != nullptr); // Get the loop data: // - initial constant // - limit constant // - iterator // - iterator increment // - increment operation type (i.e. ADD, SUB, etc...) // - loop test type (i.e. GT_GE, GT_LT, etc...) initBlock = loop.lpInitBlock; lbeg = loop.lpConstInit; llim = loop.lpConstLimit(); testOper = loop.lpTestOper(); lvar = loop.lpIterVar(); iterInc = loop.lpIterConst(); iterOper = loop.lpIterOper(); iterOperType = loop.lpIterOperType(); unsTest = (loop.lpTestTree->gtFlags & GTF_UNSIGNED) != 0; if (lvaTable[lvar].IsAddressExposed()) { // If the loop iteration variable is address-exposed then bail JITDUMP("Failed to unroll loop " FMT_LP ": V%02u is address exposed\n", lnum, lvar); continue; } if (lvaTable[lvar].lvIsStructField) { // If the loop iteration variable is a promoted field from a struct then bail JITDUMP("Failed to unroll loop " FMT_LP ": V%02u is a promoted struct field\n", lnum, lvar); continue; } // Locate/initialize the increment/test statements. Statement* initStmt = initBlock->lastStmt(); noway_assert((initStmt != nullptr) && (initStmt->GetNextStmt() == nullptr)); Statement* testStmt = bottom->lastStmt(); noway_assert((testStmt != nullptr) && (testStmt->GetNextStmt() == nullptr)); Statement* incrStmt = testStmt->GetPrevStmt(); noway_assert(incrStmt != nullptr); if (initStmt->GetRootNode()->OperIs(GT_JTRUE)) { // Must be a duplicated loop condition. dupCond = true; initStmt = initStmt->GetPrevStmt(); noway_assert(initStmt != nullptr); } else { dupCond = false; } // Find the number of iterations - the function returns false if not a constant number. if (!optComputeLoopRep(lbeg, llim, iterInc, iterOper, iterOperType, testOper, unsTest, dupCond, &totalIter)) { JITDUMP("Failed to unroll loop " FMT_LP ": not a constant iteration count\n", lnum); continue; } // Forget it if there are too many repetitions or not a constant loop. if (totalIter > iterLimit) { JITDUMP("Failed to unroll loop " FMT_LP ": too many iterations (%d > %d) (heuristic)\n", lnum, totalIter, iterLimit); continue; } int unrollLimitSz = UNROLL_LIMIT_SZ[compCodeOpt()]; if (INDEBUG(compStressCompile(STRESS_UNROLL_LOOPS, 50) ||) false) { // In stress mode, quadruple the size limit, and drop // the restriction that loop limit must be vector element count. unrollLimitSz *= 4; } else if (totalIter <= 1) { // No limit for single iteration loops // If there is no iteration (totalIter == 0), we will remove the loop body entirely. unrollLimitSz = INT_MAX; } else if (!(loopFlags & LPFLG_SIMD_LIMIT)) { // Otherwise unroll only if limit is Vector_.Length // (as a heuristic, not for correctness/structural reasons) JITDUMP("Failed to unroll loop " FMT_LP ": constant limit isn't Vector<T>.Length (heuristic)\n", lnum); continue; } GenTree* incr = incrStmt->GetRootNode(); // Don't unroll loops we don't understand. if (incr->gtOper != GT_ASG) { JITDUMP("Failed to unroll loop " FMT_LP ": unknown increment op (%s)\n", lnum, GenTree::OpName(incr->gtOper)); continue; } incr = incr->AsOp()->gtOp2; GenTree* init = initStmt->GetRootNode(); // Make sure everything looks ok. // clang-format off if ((init->gtOper != GT_ASG) || (init->AsOp()->gtOp1->gtOper != GT_LCL_VAR) || (init->AsOp()->gtOp1->AsLclVarCommon()->GetLclNum() != lvar) || (init->AsOp()->gtOp2->gtOper != GT_CNS_INT) || (init->AsOp()->gtOp2->AsIntCon()->gtIconVal != lbeg) || !((incr->gtOper == GT_ADD) || (incr->gtOper == GT_SUB)) || (incr->AsOp()->gtOp1->gtOper != GT_LCL_VAR) || (incr->AsOp()->gtOp1->AsLclVarCommon()->GetLclNum() != lvar) || (incr->AsOp()->gtOp2->gtOper != GT_CNS_INT) || (incr->AsOp()->gtOp2->AsIntCon()->gtIconVal != iterInc) || (testStmt->GetRootNode()->gtOper != GT_JTRUE)) { noway_assert(!"Bad precondition in Compiler::optUnrollLoops()"); continue; } // clang-format on // Heuristic: Estimated cost in code size of the unrolled loop. { ClrSafeInt<unsigned> loopCostSz; // Cost is size of one iteration auto tryIndex = loop.lpTop->bbTryIndex; // Besides calculating the loop cost, also ensure that all loop blocks are within the same EH // region, and count the number of BBJ_RETURN blocks in the loop. loopRetCount = 0; for (BasicBlock* const block : loop.LoopBlocks()) { if (block->bbTryIndex != tryIndex) { // Unrolling would require cloning EH regions JITDUMP("Failed to unroll loop " FMT_LP ": EH constraint\n", lnum); goto DONE_LOOP; } if (block->bbJumpKind == BBJ_RETURN) { ++loopRetCount; } for (Statement* const stmt : block->Statements()) { gtSetStmtInfo(stmt); loopCostSz += stmt->GetCostSz(); } } #ifdef JIT32_GCENCODER if ((totalIter > 0) && (fgReturnCount + loopRetCount * (totalIter - 1) > SET_EPILOGCNT_MAX)) { // Jit32 GC encoder can't report more than SET_EPILOGCNT_MAX epilogs. JITDUMP("Failed to unroll loop " FMT_LP ": GC encoder max epilog constraint\n", lnum); goto DONE_LOOP; } #endif // !JIT32_GCENCODER // Compute the estimated increase in code size for the unrolled loop. ClrSafeInt<unsigned> fixedLoopCostSz(8); ClrSafeInt<int> unrollCostSz = ClrSafeInt<int>(loopCostSz * ClrSafeInt<unsigned>(totalIter)) - ClrSafeInt<int>(loopCostSz + fixedLoopCostSz); // Don't unroll if too much code duplication would result. if (unrollCostSz.IsOverflow() || (unrollCostSz.Value() > unrollLimitSz)) { JITDUMP("Failed to unroll loop " FMT_LP ": size constraint (%d > %d) (heuristic)\n", lnum, unrollCostSz.Value(), unrollLimitSz); goto DONE_LOOP; } // Looks like a good idea to unroll this loop, let's do it! CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG if (verbose) { printf("\nUnrolling loop "); optPrintLoopInfo(&loop); printf(" over V%02u from %u to %u unrollCostSz = %d\n\n", lvar, lbeg, llim, unrollCostSz); } #endif } #if FEATURE_LOOP_ALIGN for (BasicBlock* const block : loop.LoopBlocks()) { block->unmarkLoopAlign(this DEBUG_ARG("Unrolled loop")); } #endif // Create the unrolled loop statement list. { // When unrolling a loop, that loop disappears (and will be removed from the loop table). Each unrolled // block will be set to exist within the parent loop, if any. However, if we unroll a loop that has // nested loops, we will create multiple copies of the nested loops. This requires adding new loop table // entries to represent the new loops. Instead of trying to do this incrementally, in the case where // nested loops exist (in any unrolled loop) we rebuild the entire loop table after unrolling. BlockToBlockMap blockMap(getAllocator(CMK_LoopOpt)); BasicBlock* insertAfter = bottom; BasicBlock::loopNumber newLoopNum = loop.lpParent; bool anyNestedLoopsUnrolledThisLoop = false; int lval; unsigned iterToUnroll = totalIter; // The number of iterations left to unroll for (lval = lbeg; iterToUnroll > 0; iterToUnroll--) { // Note: we can't use the loop.LoopBlocks() iterator, as it captures loop.lpBottom->bbNext at the // beginning of iteration, and we insert blocks before that. So we need to evaluate lpBottom->bbNext // every iteration. for (BasicBlock* block = loop.lpTop; block != loop.lpBottom->bbNext; block = block->bbNext) { BasicBlock* newBlock = insertAfter = fgNewBBafter(block->bbJumpKind, insertAfter, /*extendRegion*/ true); blockMap.Set(block, newBlock, BlockToBlockMap::Overwrite); if (!BasicBlock::CloneBlockState(this, newBlock, block, lvar, lval)) { // CloneBlockState (specifically, gtCloneExpr) doesn't handle everything. If it fails // to clone a block in the loop, splice out and forget all the blocks we cloned so far: // put the loop blocks back to how they were before we started cloning blocks, // and abort unrolling the loop. BasicBlock* oldBottomNext = insertAfter->bbNext; bottom->bbNext = oldBottomNext; oldBottomNext->bbPrev = bottom; loop.lpFlags |= LPFLG_DONT_UNROLL; // Mark it so we don't try to unroll it again. INDEBUG(++unrollFailures); JITDUMP("Failed to unroll loop " FMT_LP ": block cloning failed on " FMT_BB "\n", lnum, block->bbNum); goto DONE_LOOP; } // All blocks in the unrolled loop will now be marked with the parent loop number. Note that // if the loop being unrolled contains nested (child) loops, we will notice this below (when // we set anyNestedLoopsUnrolledThisLoop), and that will cause us to rebuild the entire loop // table and all loop annotations on blocks. However, if the loop contains no nested loops, // setting the block `bbNatLoopNum` here is sufficient to incrementally update the block's // loop info. newBlock->bbNatLoopNum = newLoopNum; // Block weight should no longer have the loop multiplier // // Note this is not quite right, as we may not have upscaled by this amount // and we might not have upscaled at all, if we had profile data. // newBlock->scaleBBWeight(1.0 / BB_LOOP_WEIGHT_SCALE); // Jump dests are set in a post-pass; make sure CloneBlockState hasn't tried to set them. assert(newBlock->bbJumpDest == nullptr); if (block == bottom) { // Remove the test; we're doing a full unroll. Statement* testCopyStmt = newBlock->lastStmt(); GenTree* testCopyExpr = testCopyStmt->GetRootNode(); assert(testCopyExpr->gtOper == GT_JTRUE); GenTree* sideEffList = nullptr; gtExtractSideEffList(testCopyExpr, &sideEffList, GTF_SIDE_EFFECT | GTF_ORDER_SIDEEFF); if (sideEffList == nullptr) { fgRemoveStmt(newBlock, testCopyStmt); } else { testCopyStmt->SetRootNode(sideEffList); } newBlock->bbJumpKind = BBJ_NONE; } } // Now redirect any branches within the newly-cloned iteration. // Don't include `bottom` in the iteration, since we've already changed the // newBlock->bbJumpKind, above. for (BasicBlock* block = loop.lpTop; block != loop.lpBottom; block = block->bbNext) { BasicBlock* newBlock = blockMap[block]; optCopyBlkDest(block, newBlock); optRedirectBlock(newBlock, &blockMap); } /* update the new value for the unrolled iterator */ switch (iterOper) { case GT_ADD: lval += iterInc; break; case GT_SUB: lval -= iterInc; break; case GT_RSH: case GT_LSH: noway_assert(!"Unrolling not implemented for this loop iterator"); goto DONE_LOOP; default: noway_assert(!"Unknown operator for constant loop iterator"); goto DONE_LOOP; } } // If we get here, we successfully cloned all the blocks in the unrolled loop. // Gut the old loop body for (BasicBlock* const block : loop.LoopBlocks()) { // Check if the old loop body had any nested loops that got cloned. Note that we need to do this // here, and not in the loop above, to handle the special case where totalIter is zero, and the // above loop doesn't execute. if (block->bbNatLoopNum != lnum) { anyNestedLoopsUnrolledThisLoop = true; } block->bbStmtList = nullptr; block->bbJumpKind = BBJ_NONE; block->bbFlags &= ~BBF_LOOP_HEAD; block->bbJumpDest = nullptr; block->bbNatLoopNum = newLoopNum; } if (anyNestedLoopsUnrolledThisLoop) { anyNestedLoopsUnrolled = true; } // If the HEAD is a BBJ_COND drop the condition (and make HEAD a BBJ_NONE block). if (head->bbJumpKind == BBJ_COND) { testStmt = head->lastStmt(); noway_assert(testStmt->GetRootNode()->gtOper == GT_JTRUE); fgRemoveStmt(head, testStmt); head->bbJumpKind = BBJ_NONE; } else { /* the loop must execute */ noway_assert(head->bbJumpKind == BBJ_NONE); } #ifdef DEBUG if (verbose) { printf("Whole unrolled loop:\n"); gtDispTree(initStmt->GetRootNode()); printf("\n"); fgDumpTrees(top, insertAfter); if (anyNestedLoopsUnrolledThisLoop) { printf("Unrolled loop " FMT_LP " contains nested loops\n", lnum); } } #endif // DEBUG // Update loop table. optMarkLoopRemoved(lnum); // Note if we created new BBJ_RETURNs (or removed some). if (totalIter > 0) { fgReturnCount += loopRetCount * (totalIter - 1); } else { assert(totalIter == 0); assert(fgReturnCount >= loopRetCount); fgReturnCount -= loopRetCount; } // Remember that something has changed. INDEBUG(++unrollCount); change = true; } DONE_LOOP:; } if (change) { #ifdef DEBUG if (verbose) { printf("\nFinished unrolling %d loops", unrollCount); if (unrollFailures > 0) { printf(", %d failures due to block cloning", unrollFailures); } printf("\n"); if (anyNestedLoopsUnrolled) { printf("At least one unrolled loop contains nested loops; recomputing loop table\n"); } } #endif // DEBUG // If we unrolled any nested loops, we rebuild the loop table (including recomputing the // return blocks list). constexpr bool computePreds = true; constexpr bool computeDoms = true; const bool computeReturnBlocks = anyNestedLoopsUnrolled; const bool computeLoops = anyNestedLoopsUnrolled; fgUpdateChangedFlowGraph(computePreds, computeDoms, computeReturnBlocks, computeLoops); DBEXEC(verbose, fgDispBasicBlocks()); } else { #ifdef DEBUG assert(unrollCount == 0); assert(!anyNestedLoopsUnrolled); if (unrollFailures > 0) { printf("\nFinished loop unrolling, %d failures due to block cloning\n", unrollFailures); } #endif // DEBUG } #ifdef DEBUG fgDebugCheckBBlist(true); fgDebugCheckLoopTable(); #endif // DEBUG return PhaseStatus::MODIFIED_EVERYTHING; } #ifdef _PREFAST_ #pragma warning(pop) #endif /***************************************************************************** * * Return false if there is a code path from 'topBB' to 'botBB' that might * not execute a method call. */ bool Compiler::optReachWithoutCall(BasicBlock* topBB, BasicBlock* botBB) { // TODO-Cleanup: Currently BBF_GC_SAFE_POINT is not set for helper calls, // as some helper calls are neither interruptible nor hijackable. // When we can determine this, then we can set BBF_GC_SAFE_POINT for // those helpers too. noway_assert(topBB->bbNum <= botBB->bbNum); // We can always check topBB and botBB for any gc safe points and early out if ((topBB->bbFlags | botBB->bbFlags) & BBF_GC_SAFE_POINT) { return false; } // Otherwise we will need to rely upon the dominator sets if (!fgDomsComputed) { // return a conservative answer of true when we don't have the dominator sets return true; } BasicBlock* curBB = topBB; for (;;) { noway_assert(curBB); // If we added a loop pre-header block then we will // have a bbNum greater than fgLastBB, and we won't have // any dominator information about this block, so skip it. // if (curBB->bbNum <= fgLastBB->bbNum) { noway_assert(curBB->bbNum <= botBB->bbNum); // Does this block contain a gc safe point? if (curBB->bbFlags & BBF_GC_SAFE_POINT) { // Will this block always execute on the way to botBB ? // // Since we are checking every block in [topBB .. botBB] and we are using // a lexical definition of a loop. // (all that we know is that is that botBB is a back-edge to topBB) // Thus while walking blocks in this range we may encounter some blocks // that are not really part of the loop, and so we need to perform // some additional checks: // // We will check that the current 'curBB' is reachable from 'topBB' // and that it dominates the block containing the back-edge 'botBB' // When both of these are true then we know that the gcsafe point in 'curBB' // will be encountered in the loop and we can return false // if (fgDominate(curBB, botBB) && fgReachable(topBB, curBB)) { return false; } } else { // If we've reached the destination block, then we're done if (curBB == botBB) { break; } } } curBB = curBB->bbNext; } // If we didn't find any blocks that contained a gc safe point and // also met the fgDominate and fgReachable criteria then we must return true // return true; } // static Compiler::fgWalkResult Compiler::optInvertCountTreeInfo(GenTree** pTree, fgWalkData* data) { OptInvertCountTreeInfoType* o = (OptInvertCountTreeInfoType*)data->pCallbackData; if (Compiler::IsSharedStaticHelper(*pTree)) { o->sharedStaticHelperCount += 1; } if ((*pTree)->OperGet() == GT_ARR_LENGTH) { o->arrayLengthCount += 1; } return WALK_CONTINUE; } //----------------------------------------------------------------------------- // optInvertWhileLoop: modify flow and duplicate code so that for/while loops are // entered at top and tested at bottom (aka loop rotation or bottom testing). // Creates a "zero trip test" condition which guards entry to the loop. // Enables loop invariant hoisting and loop cloning, which depend on // `do {} while` format loops. Enables creation of a pre-header block after the // zero trip test to place code that only runs if the loop is guaranteed to // run at least once. // // Arguments: // block -- block that may be the predecessor of the un-rotated loop's test block. // // Returns: // true if any IR changes possibly made (used to determine phase return status) // // Notes: // Uses a simple lexical screen to detect likely loops. // // Specifically, we're looking for the following case: // // ... // jmp test // `block` argument // loop: // ... // ... // test: // ..stmts.. // cond // jtrue loop // // If we find this, and the condition is simple enough, we change // the loop to the following: // // ... // ..stmts.. // duplicated cond block statments // cond // duplicated cond // jfalse done // // else fall-through // loop: // ... // ... // test: // ..stmts.. // cond // jtrue loop // done: // // Makes no changes if the flow pattern match fails. // // May not modify a loop if profile is unfavorable, if the cost of duplicating // code is large (factoring in potential CSEs). // bool Compiler::optInvertWhileLoop(BasicBlock* block) { assert(opts.OptimizationEnabled()); assert(compCodeOpt() != SMALL_CODE); // Does the BB end with an unconditional jump? if (block->bbJumpKind != BBJ_ALWAYS || (block->bbFlags & BBF_KEEP_BBJ_ALWAYS)) { // It can't be one of the ones we use for our exception magic return false; } // Get hold of the jump target BasicBlock* bTest = block->bbJumpDest; // Does the block consist of 'jtrue(cond) block' ? if (bTest->bbJumpKind != BBJ_COND) { return false; } // bTest must be a backwards jump to block->bbNext if (bTest->bbJumpDest != block->bbNext) { return false; } // Since test is a BBJ_COND it will have a bbNext noway_assert(bTest->bbNext != nullptr); // 'block' must be in the same try region as the condition, since we're going to insert a duplicated condition // in a new block after 'block', and the condition might include exception throwing code. // On non-funclet platforms (x86), the catch exit is a BBJ_ALWAYS, but we don't want that to // be considered as the head of a loop, so also disallow different handler regions. if (!BasicBlock::sameEHRegion(block, bTest)) { return false; } // The duplicated condition block will branch to bTest->bbNext, so that also better be in the // same try region (or no try region) to avoid generating illegal flow. BasicBlock* bTestNext = bTest->bbNext; if (bTestNext->hasTryIndex() && !BasicBlock::sameTryRegion(block, bTestNext)) { return false; } // It has to be a forward jump. Defer this check until after all the cheap checks // are done, since it iterates forward in the block list looking for bbJumpDest. // TODO-CQ: Check if we can also optimize the backwards jump as well. // if (!fgIsForwardBranch(block)) { return false; } // Find the loop termination test at the bottom of the loop. Statement* condStmt = bTest->lastStmt(); // Verify the test block ends with a conditional that we can manipulate. GenTree* const condTree = condStmt->GetRootNode(); noway_assert(condTree->gtOper == GT_JTRUE); if (!condTree->AsOp()->gtOp1->OperIsCompare()) { return false; } // Estimate the cost of cloning the entire test block. // // Note: it would help throughput to compute the maximum cost // first and early out for large bTest blocks, as we are doing two // tree walks per tree. But because of this helper call scan, the // maximum cost depends on the trees in the block. // // We might consider flagging blocks with hoistable helper calls // during importation, so we can avoid the helper search and // implement an early bail out for large blocks with no helper calls. // // Note that gtPrepareCost can cause operand swapping, so we must // return `true` (possible IR change) from here on. unsigned estDupCostSz = 0; for (Statement* const stmt : bTest->Statements()) { GenTree* tree = stmt->GetRootNode(); gtPrepareCost(tree); estDupCostSz += tree->GetCostSz(); } weight_t loopIterations = BB_LOOP_WEIGHT_SCALE; bool allProfileWeightsAreValid = false; weight_t const weightBlock = block->bbWeight; weight_t const weightTest = bTest->bbWeight; weight_t const weightNext = block->bbNext->bbWeight; // If we have profile data then we calculate the number of times // the loop will iterate into loopIterations if (fgIsUsingProfileWeights()) { // Only rely upon the profile weight when all three of these blocks // have good profile weights if (block->hasProfileWeight() && bTest->hasProfileWeight() && block->bbNext->hasProfileWeight()) { // If this while loop never iterates then don't bother transforming // if (weightNext == BB_ZERO_WEIGHT) { return true; } // We generally expect weightTest == weightNext + weightBlock. // // Tolerate small inconsistencies... // if (!fgProfileWeightsConsistent(weightBlock + weightNext, weightTest)) { JITDUMP("Profile weights locally inconsistent: block " FMT_WT ", next " FMT_WT ", test " FMT_WT "\n", weightBlock, weightNext, weightTest); } else { allProfileWeightsAreValid = true; // Determine iteration count // // weightNext is the number of time this loop iterates // weightBlock is the number of times that we enter the while loop // loopIterations is the average number of times that this loop iterates // loopIterations = weightNext / weightBlock; } } else { JITDUMP("Missing profile data for loop!\n"); } } unsigned maxDupCostSz = 34; if ((compCodeOpt() == FAST_CODE) || compStressCompile(STRESS_DO_WHILE_LOOPS, 30)) { maxDupCostSz *= 4; } // If this loop iterates a lot then raise the maxDupCost if (loopIterations >= 12.0) { maxDupCostSz *= 2; if (loopIterations >= 96.0) { maxDupCostSz *= 2; } } // If the compare has too high cost then we don't want to dup. bool costIsTooHigh = (estDupCostSz > maxDupCostSz); OptInvertCountTreeInfoType optInvertTotalInfo = {}; if (costIsTooHigh) { // If we already know that the cost is acceptable, then don't waste time walking the tree // counting things to boost the maximum allowed cost. // // If the loop condition has a shared static helper, we really want this loop converted // as not converting the loop will disable loop hoisting, meaning the shared helper will // be executed on every loop iteration. // // If the condition has array.Length operations, also boost, as they are likely to be CSE'd. for (Statement* const stmt : bTest->Statements()) { GenTree* tree = stmt->GetRootNode(); OptInvertCountTreeInfoType optInvertInfo = {}; fgWalkTreePre(&tree, Compiler::optInvertCountTreeInfo, &optInvertInfo); optInvertTotalInfo.sharedStaticHelperCount += optInvertInfo.sharedStaticHelperCount; optInvertTotalInfo.arrayLengthCount += optInvertInfo.arrayLengthCount; if ((optInvertInfo.sharedStaticHelperCount > 0) || (optInvertInfo.arrayLengthCount > 0)) { // Calculate a new maximum cost. We might be able to early exit. unsigned newMaxDupCostSz = maxDupCostSz + 24 * min(optInvertTotalInfo.sharedStaticHelperCount, (int)(loopIterations + 1.5)) + 8 * optInvertTotalInfo.arrayLengthCount; // Is the cost too high now? costIsTooHigh = (estDupCostSz > newMaxDupCostSz); if (!costIsTooHigh) { // No need counting any more trees; we're going to do the transformation. JITDUMP("Decided to duplicate loop condition block after counting helpers in tree [%06u] in " "block " FMT_BB, dspTreeID(tree), bTest->bbNum); maxDupCostSz = newMaxDupCostSz; // for the JitDump output below break; } } } } #ifdef DEBUG if (verbose) { // Note that `optInvertTotalInfo.sharedStaticHelperCount = 0` means either there were zero helpers, or the // tree walk to count them was not done. printf( "\nDuplication of loop condition [%06u] is %s, because the cost of duplication (%i) is %s than %i," "\n loopIterations = %7.3f, optInvertTotalInfo.sharedStaticHelperCount >= %d, validProfileWeights = %s\n", dspTreeID(condTree), costIsTooHigh ? "not done" : "performed", estDupCostSz, costIsTooHigh ? "greater" : "less or equal", maxDupCostSz, loopIterations, optInvertTotalInfo.sharedStaticHelperCount, dspBool(allProfileWeightsAreValid)); } #endif if (costIsTooHigh) { return true; } bool foundCondTree = false; // Create a new block after `block` to put the copied condition code. block->bbJumpKind = BBJ_NONE; block->bbJumpDest = nullptr; BasicBlock* bNewCond = fgNewBBafter(BBJ_COND, block, /*extendRegion*/ true); // Clone each statement in bTest and append to bNewCond. for (Statement* const stmt : bTest->Statements()) { GenTree* originalTree = stmt->GetRootNode(); GenTree* clonedTree = gtCloneExpr(originalTree); // Special case handling needed for the conditional jump tree if (originalTree == condTree) { foundCondTree = true; // Get the compare subtrees GenTree* originalCompareTree = originalTree->AsOp()->gtOp1; GenTree* clonedCompareTree = clonedTree->AsOp()->gtOp1; assert(originalCompareTree->OperIsCompare()); assert(clonedCompareTree->OperIsCompare()); // Flag compare and cloned copy so later we know this loop // has a proper zero trip test. originalCompareTree->gtFlags |= GTF_RELOP_ZTT; clonedCompareTree->gtFlags |= GTF_RELOP_ZTT; // The original test branches to remain in the loop. The // new cloned test will branch to avoid the loop. So the // cloned compare needs to reverse the branch condition. gtReverseCond(clonedCompareTree); } Statement* clonedStmt = fgNewStmtAtEnd(bNewCond, clonedTree); if (opts.compDbgInfo) { clonedStmt->SetDebugInfo(stmt->GetDebugInfo()); } } assert(foundCondTree); // Flag the block that received the copy as potentially having an array/vtable // reference, nullcheck, object/array allocation if the block copied from did; // this is a conservative guess. if (auto copyFlags = bTest->bbFlags & (BBF_HAS_IDX_LEN | BBF_HAS_NULLCHECK | BBF_HAS_NEWOBJ | BBF_HAS_NEWARRAY)) { bNewCond->bbFlags |= copyFlags; } bNewCond->bbJumpDest = bTest->bbNext; bNewCond->inheritWeight(block); // Update bbRefs and bbPreds for 'bNewCond', 'bNewCond->bbNext' 'bTest' and 'bTest->bbNext'. fgAddRefPred(bNewCond, block); fgAddRefPred(bNewCond->bbNext, bNewCond); fgRemoveRefPred(bTest, block); fgAddRefPred(bTest->bbNext, bNewCond); // Move all predecessor edges that look like loop entry edges to point to the new cloned condition // block, not the existing condition block. The idea is that if we only move `block` to point to // `bNewCond`, but leave other `bTest` predecessors still pointing to `bTest`, when we eventually // recognize loops, the loop will appear to have multiple entries, which will prevent optimization. // We don't have loops yet, but blocks should be in increasing lexical numbered order, so use that // as the proxy for predecessors that are "in" versus "out" of the potential loop. Note that correctness // is maintained no matter which condition block we point to, but we'll lose optimization potential // (and create spaghetti code) if we get it wrong. BlockToBlockMap blockMap(getAllocator(CMK_LoopOpt)); bool blockMapInitialized = false; unsigned loopFirstNum = bNewCond->bbNext->bbNum; unsigned loopBottomNum = bTest->bbNum; for (BasicBlock* const predBlock : bTest->PredBlocks()) { unsigned bNum = predBlock->bbNum; if ((loopFirstNum <= bNum) && (bNum <= loopBottomNum)) { // Looks like the predecessor is from within the potential loop; skip it. continue; } if (!blockMapInitialized) { blockMapInitialized = true; blockMap.Set(bTest, bNewCond); } // Redirect the predecessor to the new block. JITDUMP("Redirecting non-loop " FMT_BB " -> " FMT_BB " to " FMT_BB " -> " FMT_BB "\n", predBlock->bbNum, bTest->bbNum, predBlock->bbNum, bNewCond->bbNum); optRedirectBlock(predBlock, &blockMap, /*updatePreds*/ true); } // If we have profile data for all blocks and we know that we are cloning the // `bTest` block into `bNewCond` and thus changing the control flow from `block` so // that it no longer goes directly to `bTest` anymore, we have to adjust // various weights. // if (allProfileWeightsAreValid) { // Update the weight for bTest // JITDUMP("Reducing profile weight of " FMT_BB " from " FMT_WT " to " FMT_WT "\n", bTest->bbNum, weightTest, weightNext); bTest->bbWeight = weightNext; // Determine the new edge weights. // // We project the next/jump ratio for block and bTest by using // the original likelihoods out of bTest. // // Note "next" is the loop top block, not bTest's bbNext, // we'll call this latter block "after". // weight_t const testToNextLikelihood = min(1.0, weightNext / weightTest); weight_t const testToAfterLikelihood = 1.0 - testToNextLikelihood; // Adjust edges out of bTest (which now has weight weightNext) // weight_t const testToNextWeight = weightNext * testToNextLikelihood; weight_t const testToAfterWeight = weightNext * testToAfterLikelihood; flowList* const edgeTestToNext = fgGetPredForBlock(bTest->bbJumpDest, bTest); flowList* const edgeTestToAfter = fgGetPredForBlock(bTest->bbNext, bTest); JITDUMP("Setting weight of " FMT_BB " -> " FMT_BB " to " FMT_WT " (iterate loop)\n", bTest->bbNum, bTest->bbJumpDest->bbNum, testToNextWeight); JITDUMP("Setting weight of " FMT_BB " -> " FMT_BB " to " FMT_WT " (exit loop)\n", bTest->bbNum, bTest->bbNext->bbNum, testToAfterWeight); edgeTestToNext->setEdgeWeights(testToNextWeight, testToNextWeight, bTest->bbJumpDest); edgeTestToAfter->setEdgeWeights(testToAfterWeight, testToAfterWeight, bTest->bbNext); // Adjust edges out of block, using the same distribution. // JITDUMP("Profile weight of " FMT_BB " remains unchanged at " FMT_WT "\n", block->bbNum, weightBlock); weight_t const blockToNextLikelihood = testToNextLikelihood; weight_t const blockToAfterLikelihood = testToAfterLikelihood; weight_t const blockToNextWeight = weightBlock * blockToNextLikelihood; weight_t const blockToAfterWeight = weightBlock * blockToAfterLikelihood; flowList* const edgeBlockToNext = fgGetPredForBlock(bNewCond->bbNext, bNewCond); flowList* const edgeBlockToAfter = fgGetPredForBlock(bNewCond->bbJumpDest, bNewCond); JITDUMP("Setting weight of " FMT_BB " -> " FMT_BB " to " FMT_WT " (enter loop)\n", bNewCond->bbNum, bNewCond->bbNext->bbNum, blockToNextWeight); JITDUMP("Setting weight of " FMT_BB " -> " FMT_BB " to " FMT_WT " (avoid loop)\n", bNewCond->bbNum, bNewCond->bbJumpDest->bbNum, blockToAfterWeight); edgeBlockToNext->setEdgeWeights(blockToNextWeight, blockToNextWeight, bNewCond->bbNext); edgeBlockToAfter->setEdgeWeights(blockToAfterWeight, blockToAfterWeight, bNewCond->bbJumpDest); #ifdef DEBUG // Verify profile for the two target blocks is consistent. // fgDebugCheckIncomingProfileData(bNewCond->bbNext); fgDebugCheckIncomingProfileData(bNewCond->bbJumpDest); #endif // DEBUG } #ifdef DEBUG if (verbose) { printf("\nDuplicated loop exit block at " FMT_BB " for loop (" FMT_BB " - " FMT_BB ")\n", bNewCond->bbNum, bNewCond->bbNext->bbNum, bTest->bbNum); printf("Estimated code size expansion is %d\n", estDupCostSz); fgDumpBlock(bNewCond); fgDumpBlock(bTest); } #endif // DEBUG return true; } //----------------------------------------------------------------------------- // optInvertLoops: invert while loops in the method // // Returns: // suitable phase status // PhaseStatus Compiler::optInvertLoops() { noway_assert(opts.OptimizationEnabled()); noway_assert(fgModified == false); #if defined(OPT_CONFIG) if (!JitConfig.JitDoLoopInversion()) { JITDUMP("Loop inversion disabled\n"); return PhaseStatus::MODIFIED_NOTHING; } #endif // OPT_CONFIG if (compCodeOpt() == SMALL_CODE) { return PhaseStatus::MODIFIED_NOTHING; } bool madeChanges = false; // Assume no changes made for (BasicBlock* const block : Blocks()) { // Make sure the appropriate fields are initialized // if (block->bbWeight == BB_ZERO_WEIGHT) { // Zero weighted block can't have a LOOP_HEAD flag noway_assert(block->isLoopHead() == false); continue; } if (optInvertWhileLoop(block)) { madeChanges = true; } } if (fgModified) { // Reset fgModified here as we've done a consistent set of edits. // fgModified = false; } return madeChanges ? PhaseStatus::MODIFIED_EVERYTHING : PhaseStatus::MODIFIED_NOTHING; } //----------------------------------------------------------------------------- // optOptimizeLayout: reorder blocks to reduce cost of control flow // // Returns: // suitable phase status // PhaseStatus Compiler::optOptimizeLayout() { noway_assert(opts.OptimizationEnabled()); noway_assert(fgModified == false); bool madeChanges = false; const bool allowTailDuplication = true; madeChanges |= fgUpdateFlowGraph(allowTailDuplication); madeChanges |= fgReorderBlocks(); madeChanges |= fgUpdateFlowGraph(); // fgReorderBlocks can cause IR changes even if it does not modify // the flow graph. It calls gtPrepareCost which can cause operand swapping. // Work around this for now. // // Note phase status only impacts dumping and checking done post-phase, // it has no impact on a release build. // madeChanges = true; return madeChanges ? PhaseStatus::MODIFIED_EVERYTHING : PhaseStatus::MODIFIED_NOTHING; } //------------------------------------------------------------------------ // optMarkLoopHeads: Mark all potential loop heads as BBF_LOOP_HEAD. A potential loop head is a block // targeted by a lexical back edge, where the source of the back edge is reachable from the block. // Note that if there are no lexical back edges, there can't be any loops. // // If there are any potential loop heads, set `fgHasLoops` to `true`. // // Assumptions: // The reachability sets must be computed and valid. // void Compiler::optMarkLoopHeads() { #ifdef DEBUG if (verbose) { printf("*************** In optMarkLoopHeads()\n"); } assert(!fgCheapPredsValid); assert(fgReachabilitySetsValid); fgDebugCheckBBNumIncreasing(); int loopHeadsMarked = 0; #endif bool hasLoops = false; for (BasicBlock* const block : Blocks()) { // Set BBF_LOOP_HEAD if we have backwards branches to this block. unsigned blockNum = block->bbNum; for (BasicBlock* const predBlock : block->PredBlocks()) { if (blockNum <= predBlock->bbNum) { if (predBlock->bbJumpKind == BBJ_CALLFINALLY) { // Loops never have BBJ_CALLFINALLY as the source of their "back edge". continue; } // If block can reach predBlock then we have a loop head if (BlockSetOps::IsMember(this, predBlock->bbReach, blockNum)) { hasLoops = true; block->bbFlags |= BBF_LOOP_HEAD; INDEBUG(++loopHeadsMarked); break; // No need to look at more `block` predecessors } } } } JITDUMP("%d loop heads marked\n", loopHeadsMarked); fgHasLoops = hasLoops; } //----------------------------------------------------------------------------- // optResetLoopInfo: reset all loop info in preparation for rebuilding the loop table, or preventing // future phases from accessing loop-related data. // void Compiler::optResetLoopInfo() { #ifdef DEBUG if (verbose) { printf("*************** In optResetLoopInfo()\n"); } #endif optLoopCount = 0; // This will force the table to be rebuilt loopAlignCandidates = 0; // This will cause users to crash if they use the table when it is considered empty. // TODO: the loop table is always allocated as the same (maximum) size, so this is wasteful. // We could zero it out (possibly only in DEBUG) to be paranoid, but there's no reason to // force it to be re-allocated. optLoopTable = nullptr; for (BasicBlock* const block : Blocks()) { // If the block weight didn't come from profile data, reset it so it can be calculated again. if (!block->hasProfileWeight()) { block->bbWeight = BB_UNITY_WEIGHT; block->bbFlags &= ~BBF_RUN_RARELY; } block->bbFlags &= ~BBF_LOOP_FLAGS; block->bbNatLoopNum = BasicBlock::NOT_IN_LOOP; } } //----------------------------------------------------------------------------- // optFindAndScaleGeneralLoopBlocks: scale block weights based on loop nesting depth. // Note that this uses a very general notion of "loop": any block targeted by a reachable // back-edge is considered a loop. // void Compiler::optFindAndScaleGeneralLoopBlocks() { #ifdef DEBUG if (verbose) { printf("*************** In optFindAndScaleGeneralLoopBlocks()\n"); } #endif // This code depends on block number ordering. INDEBUG(fgDebugCheckBBNumIncreasing()); unsigned generalLoopCount = 0; // We will use the following terminology: // top - the first basic block in the loop (i.e. the head of the backward edge) // bottom - the last block in the loop (i.e. the block from which we jump to the top) // lastBottom - used when we have multiple back edges to the same top for (BasicBlock* const top : Blocks()) { // Only consider `top` blocks already determined to be potential loop heads. if (!top->isLoopHead()) { continue; } BasicBlock* foundBottom = nullptr; for (BasicBlock* const bottom : top->PredBlocks()) { // Is this a loop candidate? - We look for "back edges" // Is this a backward edge? (from BOTTOM to TOP) if (top->bbNum > bottom->bbNum) { continue; } // We only consider back-edges that are BBJ_COND or BBJ_ALWAYS for loops. if ((bottom->bbJumpKind != BBJ_COND) && (bottom->bbJumpKind != BBJ_ALWAYS)) { continue; } /* the top block must be able to reach the bottom block */ if (!fgReachable(top, bottom)) { continue; } /* Found a new loop, record the longest backedge in foundBottom */ if ((foundBottom == nullptr) || (bottom->bbNum > foundBottom->bbNum)) { foundBottom = bottom; } } if (foundBottom) { generalLoopCount++; /* Mark all blocks between 'top' and 'bottom' */ optScaleLoopBlocks(top, foundBottom); } // We track at most 255 loops if (generalLoopCount == 255) { #if COUNT_LOOPS totalUnnatLoopOverflows++; #endif break; } } JITDUMP("\nFound a total of %d general loops.\n", generalLoopCount); #if COUNT_LOOPS totalUnnatLoopCount += generalLoopCount; #endif } //----------------------------------------------------------------------------- // optFindLoops: find loops in the function. // // The JIT recognizes two types of loops in a function: natural loops and "general" (or "unnatural") loops. // Natural loops are those which get added to the loop table. Most downstream optimizations require // using natural loops. See `optFindNaturalLoops` for a definition of the criteria for recognizing a natural loop. // A general loop is defined as a lexical (program order) range of blocks where a later block branches to an // earlier block (that is, there is a back edge in the flow graph), and the later block is reachable from the earlier // block. General loops are used for weighting flow graph blocks (when there is no block profile data), as well as // for determining if we require fully interruptible GC information. // // Notes: // Also (re)sets all non-IBC block weights, and marks loops potentially needing alignment padding. // void Compiler::optFindLoops() { #ifdef DEBUG if (verbose) { printf("*************** In optFindLoops()\n"); } #endif noway_assert(opts.OptimizationEnabled()); assert(fgDomsComputed); optMarkLoopHeads(); // Were there any potential loops in the flow graph? if (fgHasLoops) { optFindNaturalLoops(); optFindAndScaleGeneralLoopBlocks(); optIdentifyLoopsForAlignment(); // Check if any of the loops need alignment } #ifdef DEBUG fgDebugCheckLoopTable(); #endif optLoopsMarked = true; } //----------------------------------------------------------------------------- // optFindLoopsPhase: The wrapper function for the "find loops" phase. // PhaseStatus Compiler::optFindLoopsPhase() { optFindLoops(); return PhaseStatus::MODIFIED_EVERYTHING; } /***************************************************************************** * * Determine the kind of interference for the call. */ /* static */ inline Compiler::callInterf Compiler::optCallInterf(GenTreeCall* call) { // if not a helper, kills everything if (call->gtCallType != CT_HELPER) { return CALLINT_ALL; } // setfield and array address store kill all indirections switch (eeGetHelperNum(call->gtCallMethHnd)) { case CORINFO_HELP_ASSIGN_REF: // Not strictly needed as we don't make a GT_CALL with this case CORINFO_HELP_CHECKED_ASSIGN_REF: // Not strictly needed as we don't make a GT_CALL with this case CORINFO_HELP_ASSIGN_BYREF: // Not strictly needed as we don't make a GT_CALL with this case CORINFO_HELP_SETFIELDOBJ: case CORINFO_HELP_ARRADDR_ST: return CALLINT_REF_INDIRS; case CORINFO_HELP_SETFIELDFLOAT: case CORINFO_HELP_SETFIELDDOUBLE: case CORINFO_HELP_SETFIELD8: case CORINFO_HELP_SETFIELD16: case CORINFO_HELP_SETFIELD32: case CORINFO_HELP_SETFIELD64: return CALLINT_SCL_INDIRS; case CORINFO_HELP_ASSIGN_STRUCT: // Not strictly needed as we don't use this case CORINFO_HELP_MEMSET: // Not strictly needed as we don't make a GT_CALL with this case CORINFO_HELP_MEMCPY: // Not strictly needed as we don't make a GT_CALL with this case CORINFO_HELP_SETFIELDSTRUCT: return CALLINT_ALL_INDIRS; default: break; } // other helpers kill nothing return CALLINT_NONE; } /***************************************************************************** * * See if the given tree can be computed in the given precision (which must * be smaller than the type of the tree for this to make sense). If 'doit' * is false, we merely check to see whether narrowing is possible; if we * get called with 'doit' being true, we actually perform the narrowing. */ bool Compiler::optNarrowTree(GenTree* tree, var_types srct, var_types dstt, ValueNumPair vnpNarrow, bool doit) { genTreeOps oper; unsigned kind; noway_assert(tree); noway_assert(genActualType(tree->gtType) == genActualType(srct)); /* Assume we're only handling integer types */ noway_assert(varTypeIsIntegral(srct)); noway_assert(varTypeIsIntegral(dstt)); unsigned srcSize = genTypeSize(srct); unsigned dstSize = genTypeSize(dstt); /* dstt must be smaller than srct to narrow */ if (dstSize >= srcSize) { return false; } /* Figure out what kind of a node we have */ oper = tree->OperGet(); kind = tree->OperKind(); if (oper == GT_ASG) { noway_assert(doit == false); return false; } ValueNumPair NoVNPair = ValueNumPair(); if (kind & GTK_LEAF) { switch (oper) { /* Constants can usually be narrowed by changing their value */ CLANG_FORMAT_COMMENT_ANCHOR; #ifndef TARGET_64BIT __int64 lval; __int64 lmask; case GT_CNS_LNG: lval = tree->AsIntConCommon()->LngValue(); lmask = 0; switch (dstt) { case TYP_BYTE: lmask = 0x0000007F; break; case TYP_BOOL: case TYP_UBYTE: lmask = 0x000000FF; break; case TYP_SHORT: lmask = 0x00007FFF; break; case TYP_USHORT: lmask = 0x0000FFFF; break; case TYP_INT: lmask = 0x7FFFFFFF; break; case TYP_UINT: lmask = 0xFFFFFFFF; break; default: return false; } if ((lval & lmask) != lval) return false; if (doit) { tree->BashToConst(static_cast<int32_t>(lval)); if (vnStore != nullptr) { fgValueNumberTreeConst(tree); } } return true; #endif case GT_CNS_INT: ssize_t ival; ival = tree->AsIntCon()->gtIconVal; ssize_t imask; imask = 0; switch (dstt) { case TYP_BYTE: imask = 0x0000007F; break; case TYP_BOOL: case TYP_UBYTE: imask = 0x000000FF; break; case TYP_SHORT: imask = 0x00007FFF; break; case TYP_USHORT: imask = 0x0000FFFF; break; #ifdef TARGET_64BIT case TYP_INT: imask = 0x7FFFFFFF; break; case TYP_UINT: imask = 0xFFFFFFFF; break; #endif // TARGET_64BIT default: return false; } if ((ival & imask) != ival) { return false; } #ifdef TARGET_64BIT if (doit) { tree->gtType = TYP_INT; tree->AsIntCon()->gtIconVal = (int)ival; if (vnStore != nullptr) { fgValueNumberTreeConst(tree); } } #endif // TARGET_64BIT return true; /* Operands that are in memory can usually be narrowed simply by changing their gtType */ case GT_LCL_VAR: /* We only allow narrowing long -> int for a GT_LCL_VAR */ if (dstSize == sizeof(int)) { goto NARROW_IND; } break; case GT_CLS_VAR: case GT_LCL_FLD: goto NARROW_IND; default: break; } noway_assert(doit == false); return false; } if (kind & (GTK_BINOP | GTK_UNOP)) { GenTree* op1; op1 = tree->AsOp()->gtOp1; GenTree* op2; op2 = tree->AsOp()->gtOp2; switch (tree->gtOper) { case GT_AND: noway_assert(genActualType(tree->gtType) == genActualType(op1->gtType)); noway_assert(genActualType(tree->gtType) == genActualType(op2->gtType)); GenTree* opToNarrow; opToNarrow = nullptr; GenTree** otherOpPtr; otherOpPtr = nullptr; bool foundOperandThatBlocksNarrowing; foundOperandThatBlocksNarrowing = false; // If 'dstt' is unsigned and one of the operands can be narrowed into 'dsst', // the result of the GT_AND will also fit into 'dstt' and can be narrowed. // The same is true if one of the operands is an int const and can be narrowed into 'dsst'. if (!gtIsActiveCSE_Candidate(op2) && ((op2->gtOper == GT_CNS_INT) || varTypeIsUnsigned(dstt))) { if (optNarrowTree(op2, srct, dstt, NoVNPair, false)) { opToNarrow = op2; otherOpPtr = &tree->AsOp()->gtOp1; } else { foundOperandThatBlocksNarrowing = true; } } if ((opToNarrow == nullptr) && !gtIsActiveCSE_Candidate(op1) && ((op1->gtOper == GT_CNS_INT) || varTypeIsUnsigned(dstt))) { if (optNarrowTree(op1, srct, dstt, NoVNPair, false)) { opToNarrow = op1; otherOpPtr = &tree->AsOp()->gtOp2; } else { foundOperandThatBlocksNarrowing = true; } } if (opToNarrow != nullptr) { // We will change the type of the tree and narrow opToNarrow // if (doit) { tree->gtType = genActualType(dstt); tree->SetVNs(vnpNarrow); optNarrowTree(opToNarrow, srct, dstt, NoVNPair, true); // We may also need to cast away the upper bits of *otherOpPtr if (srcSize == 8) { assert(tree->gtType == TYP_INT); GenTree* castOp = gtNewCastNode(TYP_INT, *otherOpPtr, false, TYP_INT); #ifdef DEBUG castOp->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif *otherOpPtr = castOp; } } return true; } if (foundOperandThatBlocksNarrowing) { noway_assert(doit == false); return false; } goto COMMON_BINOP; case GT_ADD: case GT_MUL: if (tree->gtOverflow() || varTypeIsSmall(dstt)) { noway_assert(doit == false); return false; } FALLTHROUGH; case GT_OR: case GT_XOR: noway_assert(genActualType(tree->gtType) == genActualType(op1->gtType)); noway_assert(genActualType(tree->gtType) == genActualType(op2->gtType)); COMMON_BINOP: if (gtIsActiveCSE_Candidate(op1) || gtIsActiveCSE_Candidate(op2) || !optNarrowTree(op1, srct, dstt, NoVNPair, doit) || !optNarrowTree(op2, srct, dstt, NoVNPair, doit)) { noway_assert(doit == false); return false; } /* Simply change the type of the tree */ if (doit) { if (tree->gtOper == GT_MUL && (tree->gtFlags & GTF_MUL_64RSLT)) { tree->gtFlags &= ~GTF_MUL_64RSLT; } tree->gtType = genActualType(dstt); tree->SetVNs(vnpNarrow); } return true; case GT_IND: NARROW_IND: if ((dstSize > genTypeSize(tree->gtType)) && (varTypeIsUnsigned(dstt) && !varTypeIsUnsigned(tree->gtType))) { return false; } /* Simply change the type of the tree */ if (doit && (dstSize <= genTypeSize(tree->gtType))) { if (!varTypeIsSmall(dstt)) { dstt = varTypeToSigned(dstt); } tree->gtType = dstt; tree->SetVNs(vnpNarrow); /* Make sure we don't mess up the variable type */ if ((oper == GT_LCL_VAR) || (oper == GT_LCL_FLD)) { tree->gtFlags |= GTF_VAR_CAST; } } return true; case GT_EQ: case GT_NE: case GT_LT: case GT_LE: case GT_GT: case GT_GE: /* These can always be narrowed since they only represent 0 or 1 */ return true; case GT_CAST: { var_types cast = tree->CastToType(); var_types oprt = op1->TypeGet(); unsigned oprSize = genTypeSize(oprt); if (cast != srct) { return false; } if (varTypeIsIntegralOrI(dstt) != varTypeIsIntegralOrI(oprt)) { return false; } if (tree->gtOverflow()) { return false; } /* Is this a cast from the type we're narrowing to or a smaller one? */ if (oprSize <= dstSize) { /* Bash the target type of the cast */ if (doit) { if (!varTypeIsSmall(dstt)) { dstt = varTypeToSigned(dstt); } if ((oprSize == dstSize) && ((varTypeIsUnsigned(dstt) == varTypeIsUnsigned(oprt)) || !varTypeIsSmall(dstt))) { // Same size and there is no signedness mismatch for small types: change the CAST // into a NOP JITDUMP("Cast operation has no effect, bashing [%06d] GT_CAST into a GT_NOP.\n", dspTreeID(tree)); tree->ChangeOper(GT_NOP); tree->gtType = dstt; // Clear the GTF_UNSIGNED flag, as it may have been set on the cast node tree->gtFlags &= ~GTF_UNSIGNED; tree->AsOp()->gtOp2 = nullptr; tree->gtVNPair = op1->gtVNPair; // Set to op1's ValueNumber } else { // oprSize is smaller or there is a signedness mismatch for small types // Change the CastToType in the GT_CAST node tree->CastToType() = dstt; // The result type of a GT_CAST is never a small type. // Use genActualType to widen dstt when it is a small types. tree->gtType = genActualType(dstt); tree->SetVNs(vnpNarrow); } } return true; } } return false; case GT_COMMA: if (!gtIsActiveCSE_Candidate(op2) && optNarrowTree(op2, srct, dstt, vnpNarrow, doit)) { /* Simply change the type of the tree */ if (doit) { tree->gtType = genActualType(dstt); tree->SetVNs(vnpNarrow); } return true; } return false; default: noway_assert(doit == false); return false; } } return false; } /***************************************************************************** * * The following logic figures out whether the given variable is assigned * somewhere in a list of basic blocks (or in an entire loop). */ Compiler::fgWalkResult Compiler::optIsVarAssgCB(GenTree** pTree, fgWalkData* data) { GenTree* tree = *pTree; if (tree->OperIs(GT_ASG)) { GenTree* dest = tree->AsOp()->gtOp1; genTreeOps destOper = dest->OperGet(); isVarAssgDsc* desc = (isVarAssgDsc*)data->pCallbackData; assert(desc && desc->ivaSelf == desc); if (destOper == GT_LCL_VAR) { unsigned tvar = dest->AsLclVarCommon()->GetLclNum(); if (tvar < lclMAX_ALLSET_TRACKED) { AllVarSetOps::AddElemD(data->compiler, desc->ivaMaskVal, tvar); } else { desc->ivaMaskIncomplete = true; } if (tvar == desc->ivaVar) { if (tree != desc->ivaSkip) { return WALK_ABORT; } } } else if (destOper == GT_LCL_FLD) { /* We can't track every field of every var. Moreover, indirections may access different parts of the var as different (but overlapping) fields. So just treat them as indirect accesses */ // unsigned lclNum = dest->AsLclFld()->GetLclNum(); // noway_assert(lvaTable[lclNum].lvAddrTaken); varRefKinds refs = varTypeIsGC(tree->TypeGet()) ? VR_IND_REF : VR_IND_SCL; desc->ivaMaskInd = varRefKinds(desc->ivaMaskInd | refs); } else if (destOper == GT_CLS_VAR) { desc->ivaMaskInd = varRefKinds(desc->ivaMaskInd | VR_GLB_VAR); } else if (destOper == GT_IND) { /* Set the proper indirection bits */ varRefKinds refs = varTypeIsGC(tree->TypeGet()) ? VR_IND_REF : VR_IND_SCL; desc->ivaMaskInd = varRefKinds(desc->ivaMaskInd | refs); } } else if (tree->gtOper == GT_CALL) { isVarAssgDsc* desc = (isVarAssgDsc*)data->pCallbackData; assert(desc && desc->ivaSelf == desc); desc->ivaMaskCall = optCallInterf(tree->AsCall()); } return WALK_CONTINUE; } /*****************************************************************************/ bool Compiler::optIsVarAssigned(BasicBlock* beg, BasicBlock* end, GenTree* skip, unsigned var) { bool result; isVarAssgDsc desc; desc.ivaSkip = skip; #ifdef DEBUG desc.ivaSelf = &desc; #endif desc.ivaVar = var; desc.ivaMaskCall = CALLINT_NONE; AllVarSetOps::AssignNoCopy(this, desc.ivaMaskVal, AllVarSetOps::MakeEmpty(this)); for (;;) { noway_assert(beg != nullptr); for (Statement* const stmt : beg->Statements()) { if (fgWalkTreePre(stmt->GetRootNodePointer(), optIsVarAssgCB, &desc) != WALK_CONTINUE) { result = true; goto DONE; } } if (beg == end) { break; } beg = beg->bbNext; } result = false; DONE: return result; } /***************************************************************************** * Is "var" assigned in the loop "lnum" ? */ bool Compiler::optIsVarAssgLoop(unsigned lnum, unsigned var) { assert(lnum < optLoopCount); if (var < lclMAX_ALLSET_TRACKED) { ALLVARSET_TP vs(AllVarSetOps::MakeSingleton(this, var)); return optIsSetAssgLoop(lnum, vs) != 0; } else { return optIsVarAssigned(optLoopTable[lnum].lpHead->bbNext, optLoopTable[lnum].lpBottom, nullptr, var); } } /*****************************************************************************/ int Compiler::optIsSetAssgLoop(unsigned lnum, ALLVARSET_VALARG_TP vars, varRefKinds inds) { noway_assert(lnum < optLoopCount); LoopDsc* loop = &optLoopTable[lnum]; /* Do we already know what variables are assigned within this loop? */ if (!(loop->lpFlags & LPFLG_ASGVARS_YES)) { isVarAssgDsc desc; /* Prepare the descriptor used by the tree walker call-back */ desc.ivaVar = (unsigned)-1; desc.ivaSkip = nullptr; #ifdef DEBUG desc.ivaSelf = &desc; #endif AllVarSetOps::AssignNoCopy(this, desc.ivaMaskVal, AllVarSetOps::MakeEmpty(this)); desc.ivaMaskInd = VR_NONE; desc.ivaMaskCall = CALLINT_NONE; desc.ivaMaskIncomplete = false; /* Now walk all the statements of the loop */ for (BasicBlock* const block : loop->LoopBlocks()) { for (Statement* const stmt : block->NonPhiStatements()) { fgWalkTreePre(stmt->GetRootNodePointer(), optIsVarAssgCB, &desc); if (desc.ivaMaskIncomplete) { loop->lpFlags |= LPFLG_ASGVARS_INC; } } } AllVarSetOps::Assign(this, loop->lpAsgVars, desc.ivaMaskVal); loop->lpAsgInds = desc.ivaMaskInd; loop->lpAsgCall = desc.ivaMaskCall; /* Now we know what variables are assigned in the loop */ loop->lpFlags |= LPFLG_ASGVARS_YES; } /* Now we can finally test the caller's mask against the loop's */ if (!AllVarSetOps::IsEmptyIntersection(this, loop->lpAsgVars, vars) || (loop->lpAsgInds & inds)) { return 1; } switch (loop->lpAsgCall) { case CALLINT_ALL: /* Can't hoist if the call might have side effect on an indirection. */ if (loop->lpAsgInds != VR_NONE) { return 1; } break; case CALLINT_REF_INDIRS: /* Can't hoist if the call might have side effect on an ref indirection. */ if (loop->lpAsgInds & VR_IND_REF) { return 1; } break; case CALLINT_SCL_INDIRS: /* Can't hoist if the call might have side effect on an non-ref indirection. */ if (loop->lpAsgInds & VR_IND_SCL) { return 1; } break; case CALLINT_ALL_INDIRS: /* Can't hoist if the call might have side effect on any indirection. */ if (loop->lpAsgInds & (VR_IND_REF | VR_IND_SCL)) { return 1; } break; case CALLINT_NONE: /* Other helpers kill nothing */ break; default: noway_assert(!"Unexpected lpAsgCall value"); } return 0; } void Compiler::optPerformHoistExpr(GenTree* origExpr, BasicBlock* exprBb, unsigned lnum) { assert(exprBb != nullptr); #ifdef DEBUG if (verbose) { printf("\nHoisting a copy of "); printTreeID(origExpr); printf(" from " FMT_BB " into PreHeader " FMT_BB " for loop " FMT_LP " <" FMT_BB ".." FMT_BB ">:\n", exprBb->bbNum, optLoopTable[lnum].lpHead->bbNum, lnum, optLoopTable[lnum].lpTop->bbNum, optLoopTable[lnum].lpBottom->bbNum); gtDispTree(origExpr); printf("\n"); } #endif // Create a copy of the expression and mark it for CSE's. GenTree* hoistExpr = gtCloneExpr(origExpr, GTF_MAKE_CSE); // The hoist Expr does not have to computed into a specific register, // so clear the RegNum if it was set in the original expression hoistExpr->ClearRegNum(); // Copy any loop memory dependence. optCopyLoopMemoryDependence(origExpr, hoistExpr); // At this point we should have a cloned expression, marked with the GTF_MAKE_CSE flag assert(hoistExpr != origExpr); assert(hoistExpr->gtFlags & GTF_MAKE_CSE); GenTree* hoist = hoistExpr; // The value of the expression isn't used (unless it's an assignment). if (hoistExpr->OperGet() != GT_ASG) { hoist = gtUnusedValNode(hoistExpr); } /* Put the statement in the preheader */ INDEBUG(optLoopTable[lnum].lpValidatePreHeader()); BasicBlock* preHead = optLoopTable[lnum].lpHead; // fgMorphTree requires that compCurBB be the block that contains // (or in this case, will contain) the expression. compCurBB = preHead; hoist = fgMorphTree(hoist); preHead->bbFlags |= (exprBb->bbFlags & (BBF_HAS_IDX_LEN | BBF_HAS_NULLCHECK)); Statement* hoistStmt = gtNewStmt(hoist); // Simply append the statement at the end of the preHead's list. Statement* firstStmt = preHead->firstStmt(); if (firstStmt != nullptr) { /* append after last statement */ Statement* lastStmt = preHead->lastStmt(); assert(lastStmt->GetNextStmt() == nullptr); lastStmt->SetNextStmt(hoistStmt); hoistStmt->SetPrevStmt(lastStmt); firstStmt->SetPrevStmt(hoistStmt); } else { /* Empty pre-header - store the single statement in the block */ preHead->bbStmtList = hoistStmt; hoistStmt->SetPrevStmt(hoistStmt); } hoistStmt->SetNextStmt(nullptr); #ifdef DEBUG if (verbose) { printf("This hoisted copy placed in PreHeader (" FMT_BB "):\n", preHead->bbNum); gtDispTree(hoist); printf("\n"); } #endif if (fgStmtListThreaded) { gtSetStmtInfo(hoistStmt); fgSetStmtSeq(hoistStmt); } #ifdef DEBUG if (m_nodeTestData != nullptr) { // What is the depth of the loop "lnum"? ssize_t depth = 0; unsigned lnumIter = lnum; while (optLoopTable[lnumIter].lpParent != BasicBlock::NOT_IN_LOOP) { depth++; lnumIter = optLoopTable[lnumIter].lpParent; } NodeToTestDataMap* testData = GetNodeTestData(); TestLabelAndNum tlAndN; if (testData->Lookup(origExpr, &tlAndN) && tlAndN.m_tl == TL_LoopHoist) { if (tlAndN.m_num == -1) { printf("Node "); printTreeID(origExpr); printf(" was declared 'do not hoist', but is being hoisted.\n"); assert(false); } else if (tlAndN.m_num != depth) { printf("Node "); printTreeID(origExpr); printf(" was declared as hoistable from loop at nesting depth %d; actually hoisted from loop at depth " "%d.\n", tlAndN.m_num, depth); assert(false); } else { // We've correctly hoisted this, so remove the annotation. Later, we'll check for any remaining "must // hoist" annotations. testData->Remove(origExpr); // Now we insert an annotation to make sure that "hoistExpr" is actually CSE'd. tlAndN.m_tl = TL_CSE_Def; tlAndN.m_num = m_loopHoistCSEClass++; testData->Set(hoistExpr, tlAndN); } } } #endif #if LOOP_HOIST_STATS if (!m_curLoopHasHoistedExpression) { m_loopsWithHoistedExpressions++; m_curLoopHasHoistedExpression = true; } m_totalHoistedExpressions++; #endif // LOOP_HOIST_STATS } void Compiler::optHoistLoopCode() { // If we don't have any loops in the method then take an early out now. if (optLoopCount == 0) { JITDUMP("\nNo loops; no hoisting\n"); return; } #ifdef DEBUG unsigned jitNoHoist = JitConfig.JitNoHoist(); if (jitNoHoist > 0) { JITDUMP("\nJitNoHoist set; no hoisting\n"); return; } #endif #if 0 // The code in this #if has been useful in debugging loop hoisting issues, by // enabling selective enablement of the loop hoisting optimization according to // method hash. #ifdef DEBUG unsigned methHash = info.compMethodHash(); char* lostr = getenv("loophoisthashlo"); unsigned methHashLo = 0; if (lostr != NULL) { sscanf_s(lostr, "%x", &methHashLo); // methHashLo = (unsigned(atoi(lostr)) << 2); // So we don't have to use negative numbers. } char* histr = getenv("loophoisthashhi"); unsigned methHashHi = UINT32_MAX; if (histr != NULL) { sscanf_s(histr, "%x", &methHashHi); // methHashHi = (unsigned(atoi(histr)) << 2); // So we don't have to use negative numbers. } if (methHash < methHashLo || methHash > methHashHi) return; printf("Doing loop hoisting in %s (0x%x).\n", info.compFullName, methHash); #endif // DEBUG #endif // 0 -- debugging loop hoisting issues #ifdef DEBUG if (verbose) { printf("\n*************** In optHoistLoopCode()\n"); printf("Blocks/Trees before phase\n"); fgDispBasicBlocks(true); fgDispHandlerTab(); optPrintLoopTable(); } #endif // Consider all the loop nests, in outer-to-inner order (thus hoisting expressions outside the largest loop in which // they are invariant.) LoopHoistContext hoistCtxt(this); for (unsigned lnum = 0; lnum < optLoopCount; lnum++) { if (optLoopTable[lnum].lpFlags & LPFLG_REMOVED) { JITDUMP("\nLoop " FMT_LP " was removed\n", lnum); continue; } if (optLoopTable[lnum].lpParent == BasicBlock::NOT_IN_LOOP) { optHoistLoopNest(lnum, &hoistCtxt); } } #if DEBUG if (fgModified) { if (verbose) { printf("Blocks/Trees after optHoistLoopCode() modified flowgraph\n"); fgDispBasicBlocks(true); printf(""); } // Make sure that the predecessor lists are accurate fgDebugCheckBBlist(); } #endif #ifdef DEBUG // Test Data stuff.. // If we have no test data, early out. if (m_nodeTestData == nullptr) { return; } NodeToTestDataMap* testData = GetNodeTestData(); for (NodeToTestDataMap::KeyIterator ki = testData->Begin(); !ki.Equal(testData->End()); ++ki) { TestLabelAndNum tlAndN; GenTree* node = ki.Get(); bool b = testData->Lookup(node, &tlAndN); assert(b); if (tlAndN.m_tl != TL_LoopHoist) { continue; } // Otherwise, it is a loop hoist annotation. assert(tlAndN.m_num < 100); // >= 100 indicates nested static field address, should already have been moved. if (tlAndN.m_num >= 0) { printf("Node "); printTreeID(node); printf(" was declared 'must hoist', but has not been hoisted.\n"); assert(false); } } #endif // DEBUG } void Compiler::optHoistLoopNest(unsigned lnum, LoopHoistContext* hoistCtxt) { // Do this loop, then recursively do all nested loops. JITDUMP("\n%s " FMT_LP "\n", optLoopTable[lnum].lpParent == BasicBlock::NOT_IN_LOOP ? "Loop Nest" : "Nested Loop", lnum); #if LOOP_HOIST_STATS // Record stats m_curLoopHasHoistedExpression = false; m_loopsConsidered++; #endif // LOOP_HOIST_STATS optHoistThisLoop(lnum, hoistCtxt); VNSet* hoistedInCurLoop = hoistCtxt->ExtractHoistedInCurLoop(); if (optLoopTable[lnum].lpChild != BasicBlock::NOT_IN_LOOP) { // Add the ones hoisted in "lnum" to "hoistedInParents" for any nested loops. // TODO-Cleanup: we should have a set abstraction for loops. if (hoistedInCurLoop != nullptr) { for (VNSet::KeyIterator keys = hoistedInCurLoop->Begin(); !keys.Equal(hoistedInCurLoop->End()); ++keys) { #ifdef DEBUG bool b; assert(!hoistCtxt->m_hoistedInParentLoops.Lookup(keys.Get(), &b)); #endif hoistCtxt->m_hoistedInParentLoops.Set(keys.Get(), true); } } for (unsigned child = optLoopTable[lnum].lpChild; child != BasicBlock::NOT_IN_LOOP; child = optLoopTable[child].lpSibling) { optHoistLoopNest(child, hoistCtxt); } // Now remove them. // TODO-Cleanup: we should have a set abstraction for loops. if (hoistedInCurLoop != nullptr) { for (VNSet::KeyIterator keys = hoistedInCurLoop->Begin(); !keys.Equal(hoistedInCurLoop->End()); ++keys) { // Note that we asserted when we added these that they hadn't been members, so removing is appropriate. hoistCtxt->m_hoistedInParentLoops.Remove(keys.Get()); } } } } void Compiler::optHoistThisLoop(unsigned lnum, LoopHoistContext* hoistCtxt) { LoopDsc* pLoopDsc = &optLoopTable[lnum]; /* If loop was removed continue */ if (pLoopDsc->lpFlags & LPFLG_REMOVED) { JITDUMP(" ... not hoisting " FMT_LP ": removed\n", lnum); return; } // Ensure the per-loop sets/tables are empty. hoistCtxt->m_curLoopVnInvariantCache.RemoveAll(); #ifdef DEBUG if (verbose) { printf("optHoistThisLoop for loop " FMT_LP " <" FMT_BB ".." FMT_BB ">:\n", lnum, pLoopDsc->lpTop->bbNum, pLoopDsc->lpBottom->bbNum); printf(" Loop body %s a call\n", (pLoopDsc->lpFlags & LPFLG_CONTAINS_CALL) ? "contains" : "does not contain"); printf(" Loop has %s\n", (pLoopDsc->lpExitCnt == 1) ? "single exit" : "multiple exits"); } #endif VARSET_TP loopVars(VarSetOps::Intersection(this, pLoopDsc->lpVarInOut, pLoopDsc->lpVarUseDef)); pLoopDsc->lpVarInOutCount = VarSetOps::Count(this, pLoopDsc->lpVarInOut); pLoopDsc->lpLoopVarCount = VarSetOps::Count(this, loopVars); pLoopDsc->lpHoistedExprCount = 0; #ifndef TARGET_64BIT unsigned longVarsCount = VarSetOps::Count(this, lvaLongVars); if (longVarsCount > 0) { // Since 64-bit variables take up two registers on 32-bit targets, we increase // the Counts such that each TYP_LONG variable counts twice. // VARSET_TP loopLongVars(VarSetOps::Intersection(this, loopVars, lvaLongVars)); VARSET_TP inOutLongVars(VarSetOps::Intersection(this, pLoopDsc->lpVarInOut, lvaLongVars)); #ifdef DEBUG if (verbose) { printf("\n LONGVARS(%d)=", VarSetOps::Count(this, lvaLongVars)); lvaDispVarSet(lvaLongVars); } #endif pLoopDsc->lpLoopVarCount += VarSetOps::Count(this, loopLongVars); pLoopDsc->lpVarInOutCount += VarSetOps::Count(this, inOutLongVars); } #endif // !TARGET_64BIT #ifdef DEBUG if (verbose) { printf("\n USEDEF (%d)=", VarSetOps::Count(this, pLoopDsc->lpVarUseDef)); lvaDispVarSet(pLoopDsc->lpVarUseDef); printf("\n INOUT (%d)=", pLoopDsc->lpVarInOutCount); lvaDispVarSet(pLoopDsc->lpVarInOut); printf("\n LOOPVARS(%d)=", pLoopDsc->lpLoopVarCount); lvaDispVarSet(loopVars); printf("\n"); } #endif unsigned floatVarsCount = VarSetOps::Count(this, lvaFloatVars); if (floatVarsCount > 0) { VARSET_TP loopFPVars(VarSetOps::Intersection(this, loopVars, lvaFloatVars)); VARSET_TP inOutFPVars(VarSetOps::Intersection(this, pLoopDsc->lpVarInOut, lvaFloatVars)); pLoopDsc->lpLoopVarFPCount = VarSetOps::Count(this, loopFPVars); pLoopDsc->lpVarInOutFPCount = VarSetOps::Count(this, inOutFPVars); pLoopDsc->lpHoistedFPExprCount = 0; pLoopDsc->lpLoopVarCount -= pLoopDsc->lpLoopVarFPCount; pLoopDsc->lpVarInOutCount -= pLoopDsc->lpVarInOutFPCount; #ifdef DEBUG if (verbose) { printf(" INOUT-FP(%d)=", pLoopDsc->lpVarInOutFPCount); lvaDispVarSet(inOutFPVars); printf("\n LOOPV-FP(%d)=", pLoopDsc->lpLoopVarFPCount); lvaDispVarSet(loopFPVars); printf("\n"); } #endif } else // (floatVarsCount == 0) { pLoopDsc->lpLoopVarFPCount = 0; pLoopDsc->lpVarInOutFPCount = 0; pLoopDsc->lpHoistedFPExprCount = 0; } // Find the set of definitely-executed blocks. // Ideally, the definitely-executed blocks are the ones that post-dominate the entry block. // Until we have post-dominators, we'll special-case for single-exit blocks. // // Todo: it is not clear if this is a correctness requirement or a profitability heuristic. // It seems like the latter. Ideally have enough safeguards to prevent hoisting exception // or side-effect dependent things. // // We really should consider hoisting from conditionally executed blocks, if they are frequently executed // and it is safe to evaluate the tree early. // // In particular if we have a loop nest, when scanning the outer loop we should consider hoisting from blocks // in enclosed loops. However, this is likely to scale poorly, and we really should instead start // hoisting inner to outer. // ArrayStack<BasicBlock*> defExec(getAllocatorLoopHoist()); if (pLoopDsc->lpExitCnt == 1) { assert(pLoopDsc->lpExit != nullptr); JITDUMP(" Only considering hoisting in blocks that dominate exit block " FMT_BB "\n", pLoopDsc->lpExit->bbNum); BasicBlock* cur = pLoopDsc->lpExit; // Push dominators, until we reach "entry" or exit the loop. while (cur != nullptr && pLoopDsc->lpContains(cur) && cur != pLoopDsc->lpEntry) { defExec.Push(cur); cur = cur->bbIDom; } // If we didn't reach the entry block, give up and *just* push the entry block. if (cur != pLoopDsc->lpEntry) { JITDUMP(" -- odd, we didn't reach entry from exit via dominators. Only considering hoisting in entry " "block " FMT_BB "\n", pLoopDsc->lpEntry->bbNum); defExec.Reset(); } defExec.Push(pLoopDsc->lpEntry); } else // More than one exit { JITDUMP(" only considering hoisting in entry block " FMT_BB "\n", pLoopDsc->lpEntry->bbNum); // We'll assume that only the entry block is definitely executed. // We could in the future do better. defExec.Push(pLoopDsc->lpEntry); } optHoistLoopBlocks(lnum, &defExec, hoistCtxt); } bool Compiler::optIsProfitableToHoistTree(GenTree* tree, unsigned lnum) { LoopDsc* pLoopDsc = &optLoopTable[lnum]; bool loopContainsCall = (pLoopDsc->lpFlags & LPFLG_CONTAINS_CALL) != 0; int availRegCount; int hoistedExprCount; int loopVarCount; int varInOutCount; if (varTypeIsFloating(tree)) { hoistedExprCount = pLoopDsc->lpHoistedFPExprCount; loopVarCount = pLoopDsc->lpLoopVarFPCount; varInOutCount = pLoopDsc->lpVarInOutFPCount; availRegCount = CNT_CALLEE_SAVED_FLOAT; if (!loopContainsCall) { availRegCount += CNT_CALLEE_TRASH_FLOAT - 1; } #ifdef TARGET_ARM // For ARM each double takes two FP registers // For now on ARM we won't track singles/doubles // and instead just assume that we always have doubles. // availRegCount /= 2; #endif } else { hoistedExprCount = pLoopDsc->lpHoistedExprCount; loopVarCount = pLoopDsc->lpLoopVarCount; varInOutCount = pLoopDsc->lpVarInOutCount; availRegCount = CNT_CALLEE_SAVED - 1; if (!loopContainsCall) { availRegCount += CNT_CALLEE_TRASH - 1; } #ifndef TARGET_64BIT // For our 32-bit targets Long types take two registers. if (varTypeIsLong(tree->TypeGet())) { availRegCount = (availRegCount + 1) / 2; } #endif } // decrement the availRegCount by the count of expression that we have already hoisted. availRegCount -= hoistedExprCount; // the variables that are read/written inside the loop should // always be a subset of the InOut variables for the loop assert(loopVarCount <= varInOutCount); // When loopVarCount >= availRegCount we believe that all of the // available registers will get used to hold LclVars inside the loop. // This pessimistically assumes that each loopVar has a conflicting // lifetime with every other loopVar. // For this case we will hoist the expression only if is profitable // to place it in a stack home location (GetCostEx() >= 2*IND_COST_EX) // as we believe it will be placed in the stack or one of the other // loopVars will be spilled into the stack // if (loopVarCount >= availRegCount) { // Don't hoist expressions that are not heavy: tree->GetCostEx() < (2*IND_COST_EX) if (tree->GetCostEx() < (2 * IND_COST_EX)) { JITDUMP(" tree cost too low: %d < %d (loopVarCount %u >= availableRegCount %u)\n", tree->GetCostEx(), 2 * IND_COST_EX, loopVarCount, availRegCount); return false; } } // When varInOutCount < availRegCount we are know that there are // some available register(s) when we enter the loop body. // When varInOutCount == availRegCount there often will be a register // available when we enter the loop body, since a loop often defines a // LclVar on exit or there is often at least one LclVar that is worth // spilling to the stack to make way for this hoisted expression. // So we are willing hoist an expression with GetCostEx() == MIN_CSE_COST // if (varInOutCount > availRegCount) { // Don't hoist expressions that barely meet CSE cost requirements: tree->GetCostEx() == MIN_CSE_COST if (tree->GetCostEx() <= MIN_CSE_COST + 1) { JITDUMP(" tree not good CSE: %d <= %d (varInOutCount %u > availableRegCount %u)\n", tree->GetCostEx(), 2 * MIN_CSE_COST + 1, varInOutCount, availRegCount) return false; } } return true; } //------------------------------------------------------------------------ // optRecordLoopMemoryDependence: record that tree's value number // is dependent on a particular memory VN // // Arguments: // tree -- tree in question // block -- block containing tree // memoryVN -- VN for a "map" from a select operation encounterd // while computing the tree's VN // // Notes: // Only tracks trees in loops, and memory updates in the same loop nest. // So this is a coarse-grained dependence that is only usable for // hoisting tree out of its enclosing loops. // void Compiler::optRecordLoopMemoryDependence(GenTree* tree, BasicBlock* block, ValueNum memoryVN) { // If tree is not in a loop, we don't need to track its loop dependence. // unsigned const loopNum = block->bbNatLoopNum; if (loopNum == BasicBlock::NOT_IN_LOOP) { return; } // Find the loop associated with this memory VN. // unsigned updateLoopNum = vnStore->LoopOfVN(memoryVN); if (updateLoopNum >= BasicBlock::MAX_LOOP_NUM) { // There should be only two special non-loop loop nums. // assert((updateLoopNum == BasicBlock::MAX_LOOP_NUM) || (updateLoopNum == BasicBlock::NOT_IN_LOOP)); // memoryVN defined outside of any loop, we can ignore. // JITDUMP(" ==> Not updating loop memory dependence of [%06u], memory " FMT_VN " not defined in a loop\n", dspTreeID(tree), memoryVN); return; } // If the loop was removed, then record the dependence in the nearest enclosing loop, if any. // while ((optLoopTable[updateLoopNum].lpFlags & LPFLG_REMOVED) != 0) { unsigned const updateParentLoopNum = optLoopTable[updateLoopNum].lpParent; if (updateParentLoopNum == BasicBlock::NOT_IN_LOOP) { // Memory VN was defined in a loop, but no longer. // JITDUMP(" ==> Not updating loop memory dependence of [%06u], memory " FMT_VN " no longer defined in a loop\n", dspTreeID(tree), memoryVN); break; } JITDUMP(" ==> " FMT_LP " removed, updating dependence to parent " FMT_LP "\n", updateLoopNum, updateParentLoopNum); updateLoopNum = updateParentLoopNum; } // If the update block is not the the header of a loop containing // block, we can also ignore the update. // if (!optLoopContains(updateLoopNum, loopNum)) { JITDUMP(" ==> Not updating loop memory dependence of [%06u]/" FMT_LP ", memory " FMT_VN "/" FMT_LP " is not defined in an enclosing loop\n", dspTreeID(tree), loopNum, memoryVN, updateLoopNum); return; } // If we already have a recorded a loop entry block for this // tree, see if the new update is for a more closely nested // loop. // NodeToLoopMemoryBlockMap* const map = GetNodeToLoopMemoryBlockMap(); BasicBlock* mapBlock = nullptr; if (map->Lookup(tree, &mapBlock)) { unsigned const mapLoopNum = mapBlock->bbNatLoopNum; // If the update loop contains the existing map loop, // the existing map loop is more constraining. So no // update needed. // if (optLoopContains(updateLoopNum, mapLoopNum)) { JITDUMP(" ==> Not updating loop memory dependence of [%06u]; alrady constrained to " FMT_LP " nested in " FMT_LP "\n", dspTreeID(tree), mapLoopNum, updateLoopNum); return; } } // MemoryVN now describes the most constraining loop memory dependence // we know of. Update the map. // JITDUMP(" ==> Updating loop memory dependence of [%06u] to " FMT_LP "\n", dspTreeID(tree), updateLoopNum); map->Set(tree, optLoopTable[updateLoopNum].lpEntry, NodeToLoopMemoryBlockMap::Overwrite); } //------------------------------------------------------------------------ // optCopyLoopMemoryDependence: record that tree's loop memory dependence // is the same as some other tree. // // Arguments: // fromTree -- tree to copy dependence from // toTree -- tree in question // void Compiler::optCopyLoopMemoryDependence(GenTree* fromTree, GenTree* toTree) { NodeToLoopMemoryBlockMap* const map = GetNodeToLoopMemoryBlockMap(); BasicBlock* mapBlock = nullptr; if (map->Lookup(fromTree, &mapBlock)) { map->Set(toTree, mapBlock); } } //------------------------------------------------------------------------ // optHoistLoopBlocks: Hoist invariant expression out of the loop. // // Arguments: // loopNum - The number of the loop // blocks - A stack of blocks belonging to the loop // hoistContext - The loop hoist context // // Assumptions: // The `blocks` stack contains the definitely-executed blocks in // the loop, in the execution order, starting with the loop entry // block on top of the stack. // void Compiler::optHoistLoopBlocks(unsigned loopNum, ArrayStack<BasicBlock*>* blocks, LoopHoistContext* hoistContext) { class HoistVisitor : public GenTreeVisitor<HoistVisitor> { class Value { GenTree* m_node; public: bool m_hoistable; bool m_cctorDependent; bool m_invariant; #ifdef DEBUG const char* m_failReason; #endif Value(GenTree* node) : m_node(node), m_hoistable(false), m_cctorDependent(false), m_invariant(false) { #ifdef DEBUG m_failReason = "unset"; #endif } GenTree* Node() { return m_node; } }; ArrayStack<Value> m_valueStack; bool m_beforeSideEffect; unsigned m_loopNum; LoopHoistContext* m_hoistContext; BasicBlock* m_currentBlock; bool IsNodeHoistable(GenTree* node) { // TODO-CQ: This is a more restrictive version of a check that optIsCSEcandidate already does - it allows // a struct typed node if a class handle can be recovered from it. if (node->TypeGet() == TYP_STRUCT) { return false; } // Tree must be a suitable CSE candidate for us to be able to hoist it. return m_compiler->optIsCSEcandidate(node); } bool IsTreeVNInvariant(GenTree* tree) { ValueNum vn = tree->gtVNPair.GetLiberal(); bool vnIsInvariant = m_compiler->optVNIsLoopInvariant(vn, m_loopNum, &m_hoistContext->m_curLoopVnInvariantCache); // Even though VN is invariant in the loop (say a constant) its value may depend on position // of tree, so for loop hoisting we must also check that any memory read by tree // is also invariant in the loop. // if (vnIsInvariant) { vnIsInvariant = IsTreeLoopMemoryInvariant(tree); } return vnIsInvariant; } //------------------------------------------------------------------------ // IsTreeLoopMemoryInvariant: determine if the value number of tree // is dependent on the tree being executed within the current loop // // Arguments: // tree -- tree in question // // Returns: // true if tree could be evaluated just before loop and get the // same value. // // Note: // Calls are optimistically assumed to be invariant. // Caller must do their own analysis for these tree types. // bool IsTreeLoopMemoryInvariant(GenTree* tree) { if (tree->IsCall()) { // Calls are handled specially by hoisting, and loop memory dependence // must be checked by other means. // return true; } NodeToLoopMemoryBlockMap* const map = m_compiler->GetNodeToLoopMemoryBlockMap(); BasicBlock* loopEntryBlock = nullptr; if (map->Lookup(tree, &loopEntryBlock)) { for (MemoryKind memoryKind : allMemoryKinds()) { ValueNum loopMemoryVN = m_compiler->GetMemoryPerSsaData(loopEntryBlock->bbMemorySsaNumIn[memoryKind]) ->m_vnPair.GetLiberal(); if (!m_compiler->optVNIsLoopInvariant(loopMemoryVN, m_loopNum, &m_hoistContext->m_curLoopVnInvariantCache)) { return false; } } } return true; } public: enum { ComputeStack = false, DoPreOrder = true, DoPostOrder = true, DoLclVarsOnly = false, UseExecutionOrder = true, }; HoistVisitor(Compiler* compiler, unsigned loopNum, LoopHoistContext* hoistContext) : GenTreeVisitor(compiler) , m_valueStack(compiler->getAllocator(CMK_LoopHoist)) , m_beforeSideEffect(true) , m_loopNum(loopNum) , m_hoistContext(hoistContext) , m_currentBlock(nullptr) { } void HoistBlock(BasicBlock* block) { m_currentBlock = block; for (Statement* const stmt : block->NonPhiStatements()) { WalkTree(stmt->GetRootNodePointer(), nullptr); Value& top = m_valueStack.TopRef(); assert(top.Node() == stmt->GetRootNode()); if (top.m_hoistable) { m_compiler->optHoistCandidate(stmt->GetRootNode(), block, m_loopNum, m_hoistContext); } else { JITDUMP(" [%06u] not %s: %s\n", dspTreeID(top.Node()), top.m_invariant ? "invariant" : "hoistable", top.m_failReason); } m_valueStack.Reset(); } // Only unconditionally executed blocks in the loop are visited (see optHoistThisLoop) // so after we're done visiting the first block we need to assume the worst, that the // blocks that are not visisted have side effects. m_beforeSideEffect = false; } fgWalkResult PreOrderVisit(GenTree** use, GenTree* user) { GenTree* node = *use; m_valueStack.Emplace(node); return fgWalkResult::WALK_CONTINUE; } fgWalkResult PostOrderVisit(GenTree** use, GenTree* user) { GenTree* tree = *use; if (tree->OperIsLocal()) { GenTreeLclVarCommon* lclVar = tree->AsLclVarCommon(); unsigned lclNum = lclVar->GetLclNum(); // To be invariant a LclVar node must not be the LHS of an assignment ... bool isInvariant = !user->OperIs(GT_ASG) || (user->AsOp()->gtGetOp1() != tree); // and the variable must be in SSA ... isInvariant = isInvariant && m_compiler->lvaInSsa(lclNum) && lclVar->HasSsaName(); // and the SSA definition must be outside the loop we're hoisting from ... isInvariant = isInvariant && !m_compiler->optLoopTable[m_loopNum].lpContains( m_compiler->lvaGetDesc(lclNum)->GetPerSsaData(lclVar->GetSsaNum())->GetBlock()); // and the VN of the tree is considered invariant as well. // // TODO-CQ: This VN invariance check should not be necessary and in some cases it is conservative - it // is possible that the SSA def is outside the loop but VN does not understand what the node is doing // (e.g. LCL_FLD-based type reinterpretation) and assigns a "new, unique VN" to the node. This VN is // associated with the block where the node is, a loop block, and thus the VN is considered to not be // invariant. // On the other hand, it is possible for a SSA def to be inside the loop yet the use to be invariant, // if the defining expression is also invariant. In such a case the VN invariance would help but it is // blocked by the SSA invariance check. isInvariant = isInvariant && IsTreeVNInvariant(tree); Value& top = m_valueStack.TopRef(); assert(top.Node() == tree); if (isInvariant) { top.m_invariant = true; // In general it doesn't make sense to hoist a local node but there are exceptions, for example // LCL_FLD nodes (because then the variable cannot be enregistered and the node always turns // into a memory access). top.m_hoistable = IsNodeHoistable(tree); } #ifdef DEBUG if (!isInvariant) { top.m_failReason = "local, not rvalue / not in SSA / defined within current loop"; } else if (!top.m_hoistable) { top.m_failReason = "not handled by cse"; } #endif return fgWalkResult::WALK_CONTINUE; } // Initclass CLS_VARs and IconHandles are the base cases of cctor dependent trees. // In the IconHandle case, it's of course the dereference, rather than the constant itself, that is // truly dependent on the cctor. So a more precise approach would be to separately propagate // isCctorDependent and isAddressWhoseDereferenceWouldBeCctorDependent, but we don't for // simplicity/throughput; the constant itself would be considered non-hoistable anyway, since // optIsCSEcandidate returns false for constants. bool treeIsCctorDependent = ((tree->OperIs(GT_CLS_VAR) && ((tree->gtFlags & GTF_CLS_VAR_INITCLASS) != 0)) || (tree->OperIs(GT_CNS_INT) && ((tree->gtFlags & GTF_ICON_INITCLASS) != 0))); bool treeIsInvariant = true; bool treeHasHoistableChildren = false; int childCount; #ifdef DEBUG const char* failReason = "unknown"; #endif for (childCount = 0; m_valueStack.TopRef(childCount).Node() != tree; childCount++) { Value& child = m_valueStack.TopRef(childCount); if (child.m_hoistable) { treeHasHoistableChildren = true; } if (!child.m_invariant) { treeIsInvariant = false; INDEBUG(failReason = "variant child";) } if (child.m_cctorDependent) { // Normally, a parent of a cctor-dependent tree is also cctor-dependent. treeIsCctorDependent = true; // Check for the case where we can stop propagating cctor-dependent upwards. if (tree->OperIs(GT_COMMA) && (child.Node() == tree->gtGetOp2())) { GenTree* op1 = tree->gtGetOp1(); if (op1->OperIs(GT_CALL)) { GenTreeCall* call = op1->AsCall(); if ((call->gtCallType == CT_HELPER) && s_helperCallProperties.MayRunCctor(eeGetHelperNum(call->gtCallMethHnd))) { // Hoisting the comma is ok because it would hoist the initialization along // with the static field reference. treeIsCctorDependent = false; // Hoisting the static field without hoisting the initialization would be // incorrect, make sure we consider the field (which we flagged as // cctor-dependent) non-hoistable. noway_assert(!child.m_hoistable); } } } } } // If all the children of "tree" are hoistable, then "tree" itself can be hoisted, // unless it has a static var reference that can't be hoisted past its cctor call. bool treeIsHoistable = treeIsInvariant && !treeIsCctorDependent; #ifdef DEBUG if (treeIsInvariant && !treeIsHoistable) { failReason = "cctor dependent"; } #endif // But we must see if anything else prevents "tree" from being hoisted. // if (treeIsInvariant) { if (treeIsHoistable) { treeIsHoistable = IsNodeHoistable(tree); if (!treeIsHoistable) { INDEBUG(failReason = "not handled by cse";) } } // If it's a call, it must be a helper call, and be pure. // Further, if it may run a cctor, it must be labeled as "Hoistable" // (meaning it won't run a cctor because the class is not precise-init). if (treeIsHoistable && tree->IsCall()) { GenTreeCall* call = tree->AsCall(); if (call->gtCallType != CT_HELPER) { INDEBUG(failReason = "non-helper call";) treeIsHoistable = false; } else { CorInfoHelpFunc helpFunc = eeGetHelperNum(call->gtCallMethHnd); if (!s_helperCallProperties.IsPure(helpFunc)) { INDEBUG(failReason = "impure helper call";) treeIsHoistable = false; } else if (s_helperCallProperties.MayRunCctor(helpFunc) && ((call->gtFlags & GTF_CALL_HOISTABLE) == 0)) { INDEBUG(failReason = "non-hoistable helper call";) treeIsHoistable = false; } } } if (treeIsHoistable) { if (!m_beforeSideEffect) { // For now, we give up on an expression that might raise an exception if it is after the // first possible global side effect (and we assume we're after that if we're not in the first // block). // TODO-CQ: this is when we might do loop cloning. // if ((tree->gtFlags & GTF_EXCEPT) != 0) { INDEBUG(failReason = "side effect ordering constraint";) treeIsHoistable = false; } } } // Is the value of the whole tree loop invariant? treeIsInvariant = IsTreeVNInvariant(tree); // Is the value of the whole tree loop invariant? if (!treeIsInvariant) { // Here we have a tree that is not loop invariant and we thus cannot hoist INDEBUG(failReason = "tree VN is loop variant";) treeIsHoistable = false; } } // Next check if we need to set 'm_beforeSideEffect' to false. // // If we have already set it to false then we can skip these checks // if (m_beforeSideEffect) { // Is the value of the whole tree loop invariant? if (!treeIsInvariant) { // We have a tree that is not loop invariant and we thus cannot hoist assert(treeIsHoistable == false); // Check if we should clear m_beforeSideEffect. // If 'tree' can throw an exception then we need to set m_beforeSideEffect to false. // Note that calls are handled below if (tree->OperMayThrow(m_compiler) && !tree->IsCall()) { m_beforeSideEffect = false; } } // In the section below, we only care about memory side effects. We assume that expressions will // be hoisted so that they are evaluated in the same order as they would have been in the loop, // and therefore throw exceptions in the same order. // if (tree->IsCall()) { // If it's a call, it must be a helper call that does not mutate the heap. // Further, if it may run a cctor, it must be labeled as "Hoistable" // (meaning it won't run a cctor because the class is not precise-init). GenTreeCall* call = tree->AsCall(); if (call->gtCallType != CT_HELPER) { m_beforeSideEffect = false; } else { CorInfoHelpFunc helpFunc = eeGetHelperNum(call->gtCallMethHnd); if (s_helperCallProperties.MutatesHeap(helpFunc)) { m_beforeSideEffect = false; } else if (s_helperCallProperties.MayRunCctor(helpFunc) && (call->gtFlags & GTF_CALL_HOISTABLE) == 0) { m_beforeSideEffect = false; } // Additional check for helper calls that throw exceptions if (!treeIsInvariant) { // We have a tree that is not loop invariant and we thus cannot hoist assert(treeIsHoistable == false); // Does this helper call throw? if (!s_helperCallProperties.NoThrow(helpFunc)) { m_beforeSideEffect = false; } } } } else if (tree->OperIs(GT_ASG)) { // If the LHS of the assignment has a global reference, then assume it's a global side effect. GenTree* lhs = tree->AsOp()->gtOp1; if (lhs->gtFlags & GTF_GLOB_REF) { m_beforeSideEffect = false; } } else if (tree->OperIs(GT_XADD, GT_XORR, GT_XAND, GT_XCHG, GT_LOCKADD, GT_CMPXCHG, GT_MEMORYBARRIER)) { // If this node is a MEMORYBARRIER or an Atomic operation // then don't hoist and stop any further hoisting after this node INDEBUG(failReason = "atomic op or memory barrier";) treeIsHoistable = false; m_beforeSideEffect = false; } } // If this 'tree' is hoistable then we return and the caller will // decide to hoist it as part of larger hoistable expression. // if (!treeIsHoistable && treeHasHoistableChildren) { // The current tree is not hoistable but it has hoistable children that we need // to hoist now. // // In order to preserve the original execution order, we also need to hoist any // other hoistable trees that we encountered so far. // At this point the stack contains (in top to bottom order): // - the current node's children // - the current node // - ancestors of the current node and some of their descendants // // The ancestors have not been visited yet in post order so they're not hoistable // (and they cannot become hoistable because the current node is not) but some of // their descendants may have already been traversed and be hoistable. // // The execution order is actually bottom to top so we'll start hoisting from // the bottom of the stack, skipping the current node (which is expected to not // be hoistable). // // Note that the treeHasHoistableChildren check avoids unnecessary stack traversing // and also prevents hoisting trees too early. If the current tree is not hoistable // and it doesn't have any hoistable children then there's no point in hoisting any // other trees. Doing so would interfere with the cctor dependent case, where the // cctor dependent node is initially not hoistable and may become hoistable later, // when its parent comma node is visited. // for (int i = 0; i < m_valueStack.Height(); i++) { Value& value = m_valueStack.BottomRef(i); if (value.m_hoistable) { assert(value.Node() != tree); // Don't hoist this tree again. value.m_hoistable = false; value.m_invariant = false; m_compiler->optHoistCandidate(value.Node(), m_currentBlock, m_loopNum, m_hoistContext); } else if (value.Node() != tree) { JITDUMP(" [%06u] not %s: %s\n", dspTreeID(value.Node()), value.m_invariant ? "invariant" : "hoistable", value.m_failReason); } } } m_valueStack.Pop(childCount); Value& top = m_valueStack.TopRef(); assert(top.Node() == tree); top.m_hoistable = treeIsHoistable; top.m_cctorDependent = treeIsCctorDependent; top.m_invariant = treeIsInvariant; #ifdef DEBUG if (!top.m_invariant || !top.m_hoistable) { top.m_failReason = failReason; } #endif return fgWalkResult::WALK_CONTINUE; } }; LoopDsc* loopDsc = &optLoopTable[loopNum]; assert(blocks->Top() == loopDsc->lpEntry); HoistVisitor visitor(this, loopNum, hoistContext); while (!blocks->Empty()) { BasicBlock* block = blocks->Pop(); weight_t blockWeight = block->getBBWeight(this); JITDUMP("\n optHoistLoopBlocks " FMT_BB " (weight=%6s) of loop " FMT_LP " <" FMT_BB ".." FMT_BB ">\n", block->bbNum, refCntWtd2str(blockWeight), loopNum, loopDsc->lpTop->bbNum, loopDsc->lpBottom->bbNum); if (blockWeight < (BB_UNITY_WEIGHT / 10)) { JITDUMP(" block weight is too small to perform hoisting.\n"); continue; } visitor.HoistBlock(block); } } void Compiler::optHoistCandidate(GenTree* tree, BasicBlock* treeBb, unsigned lnum, LoopHoistContext* hoistCtxt) { assert(lnum != BasicBlock::NOT_IN_LOOP); // It must pass the hoistable profitablity tests for this loop level if (!optIsProfitableToHoistTree(tree, lnum)) { JITDUMP(" ... not profitable to hoist\n"); return; } if (hoistCtxt->m_hoistedInParentLoops.Lookup(tree->gtVNPair.GetLiberal())) { JITDUMP(" ... already hoisted same VN in parent\n"); // already hoisted in a parent loop, so don't hoist this expression. return; } if (hoistCtxt->GetHoistedInCurLoop(this)->Lookup(tree->gtVNPair.GetLiberal())) { JITDUMP(" ... already hoisted same VN in current\n"); // already hoisted this expression in the current loop, so don't hoist this expression. return; } // Create a loop pre-header in which to put the hoisted code. fgCreateLoopPreHeader(lnum); // If the block we're hoisting from and the pre-header are in different EH regions, don't hoist. // TODO: we could probably hoist things that won't raise exceptions, such as constants. if (!BasicBlock::sameTryRegion(optLoopTable[lnum].lpHead, treeBb)) { JITDUMP(" ... not hoisting in " FMT_LP ", eh region constraint (pre-header try index %d, candidate " FMT_BB " try index %d\n", lnum, optLoopTable[lnum].lpHead->bbTryIndex, treeBb->bbNum, treeBb->bbTryIndex); return; } // Expression can be hoisted optPerformHoistExpr(tree, treeBb, lnum); // Increment lpHoistedExprCount or lpHoistedFPExprCount if (!varTypeIsFloating(tree->TypeGet())) { optLoopTable[lnum].lpHoistedExprCount++; #ifndef TARGET_64BIT // For our 32-bit targets Long types take two registers. if (varTypeIsLong(tree->TypeGet())) { optLoopTable[lnum].lpHoistedExprCount++; } #endif } else // Floating point expr hoisted { optLoopTable[lnum].lpHoistedFPExprCount++; } // Record the hoisted expression in hoistCtxt hoistCtxt->GetHoistedInCurLoop(this)->Set(tree->gtVNPair.GetLiberal(), true); } bool Compiler::optVNIsLoopInvariant(ValueNum vn, unsigned lnum, VNSet* loopVnInvariantCache) { // If it is not a VN, is not loop-invariant. if (vn == ValueNumStore::NoVN) { return false; } // We'll always short-circuit constants. if (vnStore->IsVNConstant(vn) || vn == vnStore->VNForVoid()) { return true; } // If we've done this query previously, don't repeat. bool previousRes = false; if (loopVnInvariantCache->Lookup(vn, &previousRes)) { return previousRes; } bool res = true; VNFuncApp funcApp; if (vnStore->GetVNFunc(vn, &funcApp)) { if (funcApp.m_func == VNF_PhiDef) { // Is the definition within the loop? If so, is not loop-invariant. unsigned lclNum = funcApp.m_args[0]; unsigned ssaNum = funcApp.m_args[1]; LclSsaVarDsc* ssaDef = lvaTable[lclNum].GetPerSsaData(ssaNum); res = !optLoopContains(lnum, ssaDef->GetBlock()->bbNatLoopNum); } else if (funcApp.m_func == VNF_PhiMemoryDef) { BasicBlock* defnBlk = reinterpret_cast<BasicBlock*>(vnStore->ConstantValue<ssize_t>(funcApp.m_args[0])); res = !optLoopContains(lnum, defnBlk->bbNatLoopNum); } else if (funcApp.m_func == VNF_MemOpaque) { const unsigned vnLoopNum = funcApp.m_args[0]; // Check for the special "ambiguous" loop MemOpaque VN. // This is considered variant in every loop. // if (vnLoopNum == BasicBlock::MAX_LOOP_NUM) { res = false; } else { res = !optLoopContains(lnum, vnLoopNum); } } else { for (unsigned i = 0; i < funcApp.m_arity; i++) { // 4th arg of mapStore identifies the loop where the store happens. // if (funcApp.m_func == VNF_MapStore) { assert(funcApp.m_arity == 4); if (i == 3) { const unsigned vnLoopNum = funcApp.m_args[3]; res = !optLoopContains(lnum, vnLoopNum); break; } } // TODO-CQ: We need to either make sure that *all* VN functions // always take VN args, or else have a list of arg positions to exempt, as implicitly // constant. if (!optVNIsLoopInvariant(funcApp.m_args[i], lnum, loopVnInvariantCache)) { res = false; break; } } } } loopVnInvariantCache->Set(vn, res); return res; } //------------------------------------------------------------------------------ // fgCreateLoopPreHeader: Creates a pre-header block for the given loop. // A pre-header is a block outside the loop that falls through or branches to the loop // entry block. It is the only non-loop predecessor block to the entry block (thus, it // dominates the entry block). The pre-header replaces the current lpHead in the loop table. // The pre-header will be placed immediately before the loop top block, which is the first // block of the loop in program order. // // Once a loop has a pre-header, calling this function will immediately return without // creating another. // // If there already exists a block that meets the pre-header requirements, that block is marked // as a pre-header, and no flow graph modification is made. // // Note that the pre-header block can be in a different EH region from blocks in the loop, including the // entry block. Code doing hoisting is required to check the EH legality of hoisting to the pre-header // before doing so. // // Since the flow graph has changed, if needed, fgUpdateChangedFlowGraph() should be called after this // to update the block numbers, reachability, and dominators. The loop table does not need to be rebuilt. // The new pre-header block does have a copy of the previous 'head' reachability set, but the pre-header // itself doesn't exist in any reachability/dominator sets. `fgDominate` has code to specifically // handle queries about the pre-header dominating other blocks, even without re-computing dominators. // The preds lists have been maintained. // // Currently, if you create a pre-header but don't put any code in it, any subsequent fgUpdateFlowGraph() // pass might choose to compact the empty pre-header with a predecessor block. That is, a pre-header // block might disappear if not used. // // The code does not depend on the order of the BasicBlock bbNum. // // Arguments: // lnum - loop index // void Compiler::fgCreateLoopPreHeader(unsigned lnum) { #ifdef DEBUG if (verbose) { printf("*************** In fgCreateLoopPreHeader for " FMT_LP "\n", lnum); } #endif // DEBUG LoopDsc& loop = optLoopTable[lnum]; // Have we already created a loop-preheader block? if (loop.lpFlags & LPFLG_HAS_PREHEAD) { JITDUMP(" pre-header already exists\n"); INDEBUG(loop.lpValidatePreHeader()); return; } BasicBlock* head = loop.lpHead; BasicBlock* top = loop.lpTop; BasicBlock* entry = loop.lpEntry; // Ensure that lpHead always dominates lpEntry noway_assert(fgDominate(head, entry)); // If `head` is already a valid pre-header, then mark it so. if (head->GetUniqueSucc() == entry) { // The loop entry must have a single non-loop predecessor, which is the pre-header. bool loopHasProperEntryBlockPreds = true; for (BasicBlock* const predBlock : entry->PredBlocks()) { if (head == predBlock) { continue; } const bool intraLoopPred = optLoopContains(lnum, predBlock->bbNatLoopNum); if (!intraLoopPred) { loopHasProperEntryBlockPreds = false; break; } } if (loopHasProperEntryBlockPreds) { // Does this existing region have the same EH region index that we will use when we create the pre-header? // If not, we want to create a new pre-header with the expected region. bool headHasCorrectEHRegion = false; if ((top->bbFlags & BBF_TRY_BEG) != 0) { assert(top->hasTryIndex()); unsigned newTryIndex = ehTrueEnclosingTryIndexIL(top->getTryIndex()); unsigned compareTryIndex = head->hasTryIndex() ? head->getTryIndex() : EHblkDsc::NO_ENCLOSING_INDEX; headHasCorrectEHRegion = newTryIndex == compareTryIndex; } else { headHasCorrectEHRegion = BasicBlock::sameTryRegion(head, top); } if (headHasCorrectEHRegion) { JITDUMP(" converting existing header " FMT_BB " into pre-header\n", head->bbNum); loop.lpFlags |= LPFLG_HAS_PREHEAD; assert((head->bbFlags & BBF_LOOP_PREHEADER) == 0); // It isn't already a loop pre-header head->bbFlags |= BBF_LOOP_PREHEADER; INDEBUG(loop.lpValidatePreHeader()); INDEBUG(fgDebugCheckLoopTable()); return; } else { JITDUMP(" existing head " FMT_BB " doesn't have correct EH region\n", head->bbNum); } } else { JITDUMP(" existing head " FMT_BB " isn't unique non-loop predecessor of loop entry\n", head->bbNum); } } else { JITDUMP(" existing head " FMT_BB " doesn't have unique successor branching to loop entry\n", head->bbNum); } // Allocate a new basic block for the pre-header. const bool isTopEntryLoop = loop.lpIsTopEntry(); BasicBlock* preHead = bbNewBasicBlock(isTopEntryLoop ? BBJ_NONE : BBJ_ALWAYS); preHead->bbFlags |= BBF_INTERNAL | BBF_LOOP_PREHEADER; if (!isTopEntryLoop) { preHead->bbJumpDest = entry; } // Must set IL code offset preHead->bbCodeOffs = top->bbCodeOffs; // Set the default value of the preHead weight in case we don't have // valid profile data and since this blocks weight is just an estimate // we clear any BBF_PROF_WEIGHT flag that we may have picked up from head. // preHead->inheritWeight(head); preHead->bbFlags &= ~BBF_PROF_WEIGHT; // Copy the bbReach set from head for the new preHead block preHead->bbReach = BlockSetOps::MakeEmpty(this); BlockSetOps::Assign(this, preHead->bbReach, head->bbReach); // Also include 'head' in the preHead bbReach set BlockSetOps::AddElemD(this, preHead->bbReach, head->bbNum); #ifdef DEBUG if (verbose) { printf("\nCreated PreHeader (" FMT_BB ") for loop " FMT_LP " (" FMT_BB " - " FMT_BB, preHead->bbNum, lnum, top->bbNum, loop.lpBottom->bbNum); if (!isTopEntryLoop) { printf(", entry " FMT_BB, entry->bbNum); } printf("), with weight = %s\n", refCntWtd2str(preHead->getBBWeight(this))); } #endif // The preheader block is part of the containing loop (if any). preHead->bbNatLoopNum = loop.lpParent; if (fgIsUsingProfileWeights() && (head->bbJumpKind == BBJ_COND)) { if ((head->bbWeight == BB_ZERO_WEIGHT) || (entry->bbWeight == BB_ZERO_WEIGHT)) { preHead->bbWeight = BB_ZERO_WEIGHT; preHead->bbFlags |= BBF_RUN_RARELY; } else { // Allow for either the fall-through or branch to target 'entry'. BasicBlock* skipLoopBlock; if (head->bbNext == entry) { skipLoopBlock = head->bbJumpDest; } else { skipLoopBlock = head->bbNext; } assert(skipLoopBlock != entry); bool allValidProfileWeights = (head->hasProfileWeight() && skipLoopBlock->hasProfileWeight() && entry->hasProfileWeight()); if (allValidProfileWeights) { weight_t loopEnteredCount = 0; weight_t loopSkippedCount = 0; bool useEdgeWeights = fgHaveValidEdgeWeights; if (useEdgeWeights) { const flowList* edgeToEntry = fgGetPredForBlock(entry, head); const flowList* edgeToSkipLoop = fgGetPredForBlock(skipLoopBlock, head); noway_assert(edgeToEntry != nullptr); noway_assert(edgeToSkipLoop != nullptr); loopEnteredCount = (edgeToEntry->edgeWeightMin() + edgeToEntry->edgeWeightMax()) / 2.0; loopSkippedCount = (edgeToSkipLoop->edgeWeightMin() + edgeToSkipLoop->edgeWeightMax()) / 2.0; // Watch out for cases where edge weights were not properly maintained // so that it appears no profile flow enters the loop. // useEdgeWeights = !fgProfileWeightsConsistent(loopEnteredCount, BB_ZERO_WEIGHT); } if (!useEdgeWeights) { loopEnteredCount = entry->bbWeight; loopSkippedCount = skipLoopBlock->bbWeight; } weight_t loopTakenRatio = loopEnteredCount / (loopEnteredCount + loopSkippedCount); JITDUMP("%s edge weights; loopEnterCount " FMT_WT " loopSkipCount " FMT_WT " taken ratio " FMT_WT "\n", fgHaveValidEdgeWeights ? (useEdgeWeights ? "valid" : "ignored") : "invalid", loopEnteredCount, loopSkippedCount, loopTakenRatio); // Calculate a good approximation of the preHead's block weight weight_t preHeadWeight = (head->bbWeight * loopTakenRatio); preHead->setBBProfileWeight(preHeadWeight); noway_assert(!preHead->isRunRarely()); } } } // Link in the preHead block fgInsertBBbefore(top, preHead); // Ideally we would re-run SSA and VN if we optimized by doing loop hoisting. // However, that is too expensive at this point. Instead, we update the phi // node block references, if we created pre-header block due to hoisting. // This is sufficient because any definition participating in SSA that flowed // into the phi via the loop header block will now flow through the preheader // block from the header block. for (Statement* const stmt : top->Statements()) { GenTree* tree = stmt->GetRootNode(); if (tree->OperGet() != GT_ASG) { break; } GenTree* op2 = tree->gtGetOp2(); if (op2->OperGet() != GT_PHI) { break; } for (GenTreePhi::Use& use : op2->AsPhi()->Uses()) { GenTreePhiArg* phiArg = use.GetNode()->AsPhiArg(); if (phiArg->gtPredBB == head) { phiArg->gtPredBB = preHead; } } } // In which EH region should the pre-header live? // // The pre-header block is added immediately before `top`. // // The `top` block cannot be the first block of a filter or handler: `top` must have a back-edge from a // BBJ_COND or BBJ_ALWAYS within the loop, and a filter or handler cannot be branched to like that. // // The `top` block can be the first block of a `try` region, and you can fall into or branch to the // first block of a `try` region. (For top-entry loops, `top` will both be the target of a back-edge // and a fall-through from the previous block.) // // If the `top` block is NOT the first block of a `try` region, the pre-header can simply extend the // `top` block region. // // If the `top` block IS the first block of a `try`, we find its parent region and use that. For mutual-protect // regions, we need to find the actual parent, as the block stores the most "nested" mutual region. For // non-mutual-protect regions, due to EH canonicalization, we are guaranteed that no other EH regions begin // on the same block, so looking to just the parent is sufficient. Note that we can't just extend the EH // region of `top` to the pre-header, because `top` will still be the target of backward branches from // within the loop. If those backward branches come from outside the `try` (say, only the top half of the loop // is a `try` region), then we can't branch to a non-first `try` region block (you always must entry the `try` // in the first block). // // Note that hoisting any code out of a try region, for example, to a pre-header block in a different // EH region, needs to ensure that no exceptions will be thrown. assert(!fgIsFirstBlockOfFilterOrHandler(top)); if ((top->bbFlags & BBF_TRY_BEG) != 0) { // `top` is the beginning of a try block. Figure out the EH region to use. assert(top->hasTryIndex()); unsigned short newTryIndex = (unsigned short)ehTrueEnclosingTryIndexIL(top->getTryIndex()); if (newTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) { // No EH try index. preHead->clearTryIndex(); } else { preHead->setTryIndex(newTryIndex); } // What handler region to use? Use the same handler region as `top`. preHead->copyHndIndex(top); } else { // `top` is not the beginning of a try block. Just extend the EH region to the pre-header. // We don't need to call `fgExtendEHRegionBefore()` because all the special handling that function // does it to account for `top` being the first block of a `try` or handler region, which we know // is not true. preHead->copyEHRegion(top); } // TODO-CQ: set dominators for this block, to allow loop optimizations requiring them // (e.g: hoisting expression in a loop with the same 'head' as this one) // Update the loop table loop.lpHead = preHead; loop.lpFlags |= LPFLG_HAS_PREHEAD; // The new block becomes the 'head' of the loop - update bbRefs and bbPreds. // All non-loop predecessors of 'entry' now jump to 'preHead'. preHead->bbRefs = 0; bool checkNestedLoops = false; for (BasicBlock* const predBlock : entry->PredBlocks()) { // Is the predBlock in the loop? // // We want to use: // const bool intraLoopPred = loop.lpContains(predBlock); // but we can't depend on the bbNum ordering. // // Previously, this code wouldn't redirect predecessors dominated by the entry. However, that can // lead to a case where non-loop predecessor is dominated by the loop entry, and that predecessor // continues to branch to the entry, not the new pre-header. This is normally ok for hoisting // because it will introduce an SSA PHI def within the loop, which will inhibit hoisting. However, // it complicates the definition of what a pre-header is. const bool intraLoopPred = optLoopContains(lnum, predBlock->bbNatLoopNum); if (intraLoopPred) { if (predBlock != loop.lpBottom) { checkNestedLoops = true; } continue; } switch (predBlock->bbJumpKind) { case BBJ_NONE: // This 'entry' predecessor that isn't dominated by 'entry' must be outside the loop, // meaning it must be fall-through to 'entry', and we must have a top-entry loop. noway_assert((entry == top) && (predBlock == head) && (predBlock->bbNext == preHead)); fgRemoveRefPred(entry, predBlock); fgAddRefPred(preHead, predBlock); break; case BBJ_COND: if (predBlock->bbJumpDest == entry) { predBlock->bbJumpDest = preHead; noway_assert(predBlock->bbNext != preHead); } else { noway_assert((entry == top) && (predBlock == head) && (predBlock->bbNext == preHead)); } fgRemoveRefPred(entry, predBlock); fgAddRefPred(preHead, predBlock); break; case BBJ_ALWAYS: case BBJ_EHCATCHRET: noway_assert(predBlock->bbJumpDest == entry); predBlock->bbJumpDest = preHead; fgRemoveRefPred(entry, predBlock); fgAddRefPred(preHead, predBlock); break; case BBJ_SWITCH: unsigned jumpCnt; jumpCnt = predBlock->bbJumpSwt->bbsCount; BasicBlock** jumpTab; jumpTab = predBlock->bbJumpSwt->bbsDstTab; do { assert(*jumpTab); if ((*jumpTab) == entry) { (*jumpTab) = preHead; fgRemoveRefPred(entry, predBlock); fgAddRefPred(preHead, predBlock); } } while (++jumpTab, --jumpCnt); UpdateSwitchTableTarget(predBlock, entry, preHead); break; default: noway_assert(!"Unexpected bbJumpKind"); break; } } flowList* const edgeToPreHeader = fgGetPredForBlock(preHead, head); noway_assert(edgeToPreHeader != nullptr); edgeToPreHeader->setEdgeWeights(preHead->bbWeight, preHead->bbWeight, preHead); noway_assert(fgGetPredForBlock(entry, preHead) == nullptr); flowList* const edgeFromPreHeader = fgAddRefPred(entry, preHead); edgeFromPreHeader->setEdgeWeights(preHead->bbWeight, preHead->bbWeight, entry); /* If we found at least one back-edge in the flowgraph pointing to the entry of the loop (other than the back-edge of the loop we are considering) then we likely have nested do-while loops with the same entry block and inserting the preheader block changes the head of all the nested loops. Now we will update this piece of information in the loop table, and mark all nested loops as having a preheader (the preheader block can be shared among all nested do-while loops with the same entry block). */ if (checkNestedLoops) { for (unsigned l = 0; l < optLoopCount; l++) { if (optLoopTable[l].lpHead == head) { // loop.lpHead was already changed from 'head' to 'preHead' noway_assert(l != lnum); // If it shares head, it must be a top-entry loop that shares top. noway_assert(optLoopTable[l].lpEntry == top); optUpdateLoopHead(l, optLoopTable[l].lpHead, preHead); optLoopTable[l].lpFlags |= LPFLG_HAS_PREHEAD; #ifdef DEBUG if (verbose) { printf("Same PreHeader (" FMT_BB ") can be used for loop " FMT_LP " (" FMT_BB " - " FMT_BB ")\n\n", preHead->bbNum, l, top->bbNum, optLoopTable[l].lpBottom->bbNum); } #endif } } } // We added a new block and altered the preds list; make sure the flow graph has been marked as being modified. assert(fgModified); #ifdef DEBUG fgDebugCheckBBlist(); fgVerifyHandlerTab(); fgDebugCheckLoopTable(); if (verbose) { JITDUMP("*************** After fgCreateLoopPreHeader for " FMT_LP "\n", lnum); fgDispBasicBlocks(); fgDispHandlerTab(); optPrintLoopTable(); } #endif } bool Compiler::optBlockIsLoopEntry(BasicBlock* blk, unsigned* pLnum) { for (unsigned lnum = blk->bbNatLoopNum; lnum != BasicBlock::NOT_IN_LOOP; lnum = optLoopTable[lnum].lpParent) { if (optLoopTable[lnum].lpFlags & LPFLG_REMOVED) { continue; } if (optLoopTable[lnum].lpEntry == blk) { *pLnum = lnum; return true; } } return false; } void Compiler::optComputeLoopSideEffects() { unsigned lnum; for (lnum = 0; lnum < optLoopCount; lnum++) { VarSetOps::AssignNoCopy(this, optLoopTable[lnum].lpVarInOut, VarSetOps::MakeEmpty(this)); VarSetOps::AssignNoCopy(this, optLoopTable[lnum].lpVarUseDef, VarSetOps::MakeEmpty(this)); optLoopTable[lnum].lpFlags &= ~LPFLG_CONTAINS_CALL; } for (lnum = 0; lnum < optLoopCount; lnum++) { if (optLoopTable[lnum].lpFlags & LPFLG_REMOVED) { continue; } if (optLoopTable[lnum].lpParent == BasicBlock::NOT_IN_LOOP) { // Is outermost... optComputeLoopNestSideEffects(lnum); } } VarSetOps::AssignNoCopy(this, lvaFloatVars, VarSetOps::MakeEmpty(this)); #ifndef TARGET_64BIT VarSetOps::AssignNoCopy(this, lvaLongVars, VarSetOps::MakeEmpty(this)); #endif for (unsigned i = 0; i < lvaCount; i++) { LclVarDsc* varDsc = lvaGetDesc(i); if (varDsc->lvTracked) { if (varTypeIsFloating(varDsc->lvType)) { VarSetOps::AddElemD(this, lvaFloatVars, varDsc->lvVarIndex); } #ifndef TARGET_64BIT else if (varTypeIsLong(varDsc->lvType)) { VarSetOps::AddElemD(this, lvaLongVars, varDsc->lvVarIndex); } #endif } } } void Compiler::optComputeLoopNestSideEffects(unsigned lnum) { JITDUMP("optComputeLoopNestSideEffects for " FMT_LP "\n", lnum); assert(optLoopTable[lnum].lpParent == BasicBlock::NOT_IN_LOOP); // Requires: lnum is outermost. for (BasicBlock* const bbInLoop : optLoopTable[lnum].LoopBlocks()) { if (!optComputeLoopSideEffectsOfBlock(bbInLoop)) { // When optComputeLoopSideEffectsOfBlock returns false, we encountered // a block that was moved into the loop range (by fgReorderBlocks), // but not marked correctly as being inside the loop. // We conservatively mark this loop (and any outer loops) // as having memory havoc side effects. // // Record that all loops containing this block have memory havoc effects. // optRecordLoopNestsMemoryHavoc(lnum, fullMemoryKindSet); // All done, no need to keep visiting more blocks break; } } } void Compiler::optRecordLoopNestsMemoryHavoc(unsigned lnum, MemoryKindSet memoryHavoc) { // We should start out with 'lnum' set to a valid natural loop index assert(lnum != BasicBlock::NOT_IN_LOOP); while (lnum != BasicBlock::NOT_IN_LOOP) { for (MemoryKind memoryKind : allMemoryKinds()) { if ((memoryHavoc & memoryKindSet(memoryKind)) != 0) { optLoopTable[lnum].lpLoopHasMemoryHavoc[memoryKind] = true; } } // Move lnum to the next outtermost loop that we need to mark lnum = optLoopTable[lnum].lpParent; } } bool Compiler::optComputeLoopSideEffectsOfBlock(BasicBlock* blk) { unsigned mostNestedLoop = blk->bbNatLoopNum; JITDUMP("optComputeLoopSideEffectsOfBlock " FMT_BB ", mostNestedLoop %d\n", blk->bbNum, mostNestedLoop); if (mostNestedLoop == BasicBlock::NOT_IN_LOOP) { return false; } AddVariableLivenessAllContainingLoops(mostNestedLoop, blk); // MemoryKinds for which an in-loop call or store has arbitrary effects. MemoryKindSet memoryHavoc = emptyMemoryKindSet; // Now iterate over the remaining statements, and their trees. for (Statement* const stmt : blk->NonPhiStatements()) { for (GenTree* const tree : stmt->TreeList()) { genTreeOps oper = tree->OperGet(); // Even after we set memoryHavoc we still may want to know if a loop contains calls if (memoryHavoc == fullMemoryKindSet) { if (oper == GT_CALL) { // Record that this loop contains a call AddContainsCallAllContainingLoops(mostNestedLoop); } // If we just set LPFLG_CONTAINS_CALL or it was previously set if (optLoopTable[mostNestedLoop].lpFlags & LPFLG_CONTAINS_CALL) { // We can early exit after both memoryHavoc and LPFLG_CONTAINS_CALL are both set to true. break; } // We are just looking for GT_CALL nodes after memoryHavoc was set. continue; } // otherwise memoryHavoc is not set for at least one heap ID assert(memoryHavoc != fullMemoryKindSet); // This body is a distillation of the memory side-effect code of value numbering. // We also do a very limited analysis if byref PtrTo values, to cover some cases // that the compiler creates. if (oper == GT_ASG) { GenTree* lhs = tree->AsOp()->gtOp1->gtEffectiveVal(/*commaOnly*/ true); if (lhs->OperGet() == GT_IND) { GenTree* arg = lhs->AsOp()->gtOp1->gtEffectiveVal(/*commaOnly*/ true); FieldSeqNode* fldSeqArrElem = nullptr; if ((tree->gtFlags & GTF_IND_VOLATILE) != 0) { memoryHavoc |= memoryKindSet(GcHeap, ByrefExposed); continue; } ArrayInfo arrInfo; if (arg->TypeGet() == TYP_BYREF && arg->OperGet() == GT_LCL_VAR) { // If it's a local byref for which we recorded a value number, use that... GenTreeLclVar* argLcl = arg->AsLclVar(); if (lvaInSsa(argLcl->GetLclNum()) && argLcl->HasSsaName()) { ValueNum argVN = lvaTable[argLcl->GetLclNum()].GetPerSsaData(argLcl->GetSsaNum())->m_vnPair.GetLiberal(); VNFuncApp funcApp; if (argVN != ValueNumStore::NoVN && vnStore->GetVNFunc(argVN, &funcApp) && funcApp.m_func == VNF_PtrToArrElem) { assert(vnStore->IsVNHandle(funcApp.m_args[0])); CORINFO_CLASS_HANDLE elemType = CORINFO_CLASS_HANDLE(vnStore->ConstantValue<size_t>(funcApp.m_args[0])); AddModifiedElemTypeAllContainingLoops(mostNestedLoop, elemType); // Don't set memoryHavoc for GcHeap below. Do set memoryHavoc for ByrefExposed // (conservatively assuming that a byref may alias the array element) memoryHavoc |= memoryKindSet(ByrefExposed); continue; } } // Otherwise... memoryHavoc |= memoryKindSet(GcHeap, ByrefExposed); } // Is the LHS an array index expression? else if (lhs->ParseArrayElemForm(this, &arrInfo, &fldSeqArrElem)) { // We actually ignore "fldSeq" -- any modification to an S[], at any // field of "S", will lose all information about the array type. CORINFO_CLASS_HANDLE elemTypeEq = EncodeElemType(arrInfo.m_elemType, arrInfo.m_elemStructType); AddModifiedElemTypeAllContainingLoops(mostNestedLoop, elemTypeEq); // Conservatively assume byrefs may alias this array element memoryHavoc |= memoryKindSet(ByrefExposed); } else { GenTree* baseAddr = nullptr; FieldSeqNode* fldSeq = nullptr; if (arg->IsFieldAddr(this, &baseAddr, &fldSeq)) { assert((fldSeq != nullptr) && (fldSeq != FieldSeqStore::NotAField()) && !fldSeq->IsPseudoField()); FieldKindForVN fieldKind = (baseAddr != nullptr) ? FieldKindForVN::WithBaseAddr : FieldKindForVN::SimpleStatic; AddModifiedFieldAllContainingLoops(mostNestedLoop, fldSeq->GetFieldHandle(), fieldKind); // Conservatively assume byrefs may alias this object. memoryHavoc |= memoryKindSet(ByrefExposed); } else { memoryHavoc |= memoryKindSet(GcHeap, ByrefExposed); } } } else if (lhs->OperIsBlk()) { GenTreeLclVarCommon* lclVarTree; bool isEntire; if (!tree->DefinesLocal(this, &lclVarTree, &isEntire)) { // For now, assume arbitrary side effects on GcHeap/ByrefExposed... memoryHavoc |= memoryKindSet(GcHeap, ByrefExposed); } else if (lvaVarAddrExposed(lclVarTree->GetLclNum())) { memoryHavoc |= memoryKindSet(ByrefExposed); } } else if (lhs->OperGet() == GT_CLS_VAR) { AddModifiedFieldAllContainingLoops(mostNestedLoop, lhs->AsClsVar()->gtClsVarHnd, FieldKindForVN::SimpleStatic); // Conservatively assume byrefs may alias this static field memoryHavoc |= memoryKindSet(ByrefExposed); } // Otherwise, must be local lhs form. I should assert that. else if (lhs->OperGet() == GT_LCL_VAR) { GenTreeLclVar* lhsLcl = lhs->AsLclVar(); GenTree* rhs = tree->AsOp()->gtOp2; ValueNum rhsVN = rhs->gtVNPair.GetLiberal(); // If we gave the RHS a value number, propagate it. if (rhsVN != ValueNumStore::NoVN) { rhsVN = vnStore->VNNormalValue(rhsVN); if (lvaInSsa(lhsLcl->GetLclNum()) && lhsLcl->HasSsaName()) { lvaTable[lhsLcl->GetLclNum()] .GetPerSsaData(lhsLcl->GetSsaNum()) ->m_vnPair.SetLiberal(rhsVN); } } // If the local is address-exposed, count this as ByrefExposed havoc if (lvaVarAddrExposed(lhsLcl->GetLclNum())) { memoryHavoc |= memoryKindSet(ByrefExposed); } } } else // if (oper != GT_ASG) { switch (oper) { case GT_COMMA: tree->gtVNPair = tree->AsOp()->gtOp2->gtVNPair; break; case GT_ADDR: // Is it an addr of a array index expression? { GenTree* addrArg = tree->AsOp()->gtOp1; if (addrArg->OperGet() == GT_IND) { // Is the LHS an array index expression? if (addrArg->gtFlags & GTF_IND_ARR_INDEX) { ArrayInfo arrInfo; bool b = GetArrayInfoMap()->Lookup(addrArg, &arrInfo); assert(b); CORINFO_CLASS_HANDLE elemTypeEq = EncodeElemType(arrInfo.m_elemType, arrInfo.m_elemStructType); ValueNum elemTypeEqVN = vnStore->VNForHandle(ssize_t(elemTypeEq), GTF_ICON_CLASS_HDL); ValueNum ptrToArrElemVN = vnStore->VNForFunc(TYP_BYREF, VNF_PtrToArrElem, elemTypeEqVN, // The rest are dummy arguments. vnStore->VNForNull(), vnStore->VNForNull(), vnStore->VNForNull()); tree->gtVNPair.SetBoth(ptrToArrElemVN); } } } break; #ifdef FEATURE_HW_INTRINSICS case GT_HWINTRINSIC: if (tree->AsHWIntrinsic()->OperIsMemoryStore()) { memoryHavoc |= memoryKindSet(GcHeap, ByrefExposed); } break; #endif // FEATURE_HW_INTRINSICS case GT_LOCKADD: case GT_XORR: case GT_XAND: case GT_XADD: case GT_XCHG: case GT_CMPXCHG: case GT_MEMORYBARRIER: case GT_STORE_DYN_BLK: { memoryHavoc |= memoryKindSet(GcHeap, ByrefExposed); } break; case GT_CALL: { GenTreeCall* call = tree->AsCall(); // Record that this loop contains a call AddContainsCallAllContainingLoops(mostNestedLoop); if (call->gtCallType == CT_HELPER) { CorInfoHelpFunc helpFunc = eeGetHelperNum(call->gtCallMethHnd); if (s_helperCallProperties.MutatesHeap(helpFunc)) { memoryHavoc |= memoryKindSet(GcHeap, ByrefExposed); } else if (s_helperCallProperties.MayRunCctor(helpFunc)) { // If the call is labeled as "Hoistable", then we've checked the // class that would be constructed, and it is not precise-init, so // the cctor will not be run by this call. Otherwise, it might be, // and might have arbitrary side effects. if ((tree->gtFlags & GTF_CALL_HOISTABLE) == 0) { memoryHavoc |= memoryKindSet(GcHeap, ByrefExposed); } } } else { memoryHavoc |= memoryKindSet(GcHeap, ByrefExposed); } break; } default: // All other gtOper node kinds, leave 'memoryHavoc' unchanged (i.e. false) assert(!tree->OperRequiresAsgFlag()); break; } } } } if (memoryHavoc != emptyMemoryKindSet) { // Record that all loops containing this block have this kind of memoryHavoc effects. optRecordLoopNestsMemoryHavoc(mostNestedLoop, memoryHavoc); } return true; } // Marks the containsCall information to "lnum" and any parent loops. void Compiler::AddContainsCallAllContainingLoops(unsigned lnum) { #if FEATURE_LOOP_ALIGN // If this is the inner most loop, reset the LOOP_ALIGN flag // because a loop having call will not likely to benefit from // alignment if (optLoopTable[lnum].lpChild == BasicBlock::NOT_IN_LOOP) { BasicBlock* top = optLoopTable[lnum].lpTop; top->unmarkLoopAlign(this DEBUG_ARG("Loop with call")); } #endif assert(0 <= lnum && lnum < optLoopCount); while (lnum != BasicBlock::NOT_IN_LOOP) { optLoopTable[lnum].lpFlags |= LPFLG_CONTAINS_CALL; lnum = optLoopTable[lnum].lpParent; } } // Adds the variable liveness information for 'blk' to 'this' LoopDsc void Compiler::LoopDsc::AddVariableLiveness(Compiler* comp, BasicBlock* blk) { VarSetOps::UnionD(comp, this->lpVarInOut, blk->bbLiveIn); VarSetOps::UnionD(comp, this->lpVarInOut, blk->bbLiveOut); VarSetOps::UnionD(comp, this->lpVarUseDef, blk->bbVarUse); VarSetOps::UnionD(comp, this->lpVarUseDef, blk->bbVarDef); } // Adds the variable liveness information for 'blk' to "lnum" and any parent loops. void Compiler::AddVariableLivenessAllContainingLoops(unsigned lnum, BasicBlock* blk) { assert(0 <= lnum && lnum < optLoopCount); while (lnum != BasicBlock::NOT_IN_LOOP) { optLoopTable[lnum].AddVariableLiveness(this, blk); lnum = optLoopTable[lnum].lpParent; } } // Adds "fldHnd" to the set of modified fields of "lnum" and any parent loops. void Compiler::AddModifiedFieldAllContainingLoops(unsigned lnum, CORINFO_FIELD_HANDLE fldHnd, FieldKindForVN fieldKind) { assert(0 <= lnum && lnum < optLoopCount); while (lnum != BasicBlock::NOT_IN_LOOP) { optLoopTable[lnum].AddModifiedField(this, fldHnd, fieldKind); lnum = optLoopTable[lnum].lpParent; } } // Adds "elemType" to the set of modified array element types of "lnum" and any parent loops. void Compiler::AddModifiedElemTypeAllContainingLoops(unsigned lnum, CORINFO_CLASS_HANDLE elemClsHnd) { assert(0 <= lnum && lnum < optLoopCount); while (lnum != BasicBlock::NOT_IN_LOOP) { optLoopTable[lnum].AddModifiedElemType(this, elemClsHnd); lnum = optLoopTable[lnum].lpParent; } } //------------------------------------------------------------------------------ // optRemoveRangeCheck : Given an indexing node, mark it as not needing a range check. // // Arguments: // check - Range check tree, the raw CHECK node (ARRAY, SIMD or HWINTRINSIC). // comma - GT_COMMA to which the "check" belongs, "nullptr" if the check is a standalone one. // stmt - Statement the indexing nodes belong to. // // Return Value: // Rewritten "check" - no-op if it has no side effects or the tree that contains them. // // Assumptions: // This method is capable of removing checks of two kinds: COMMA-based and standalone top-level ones. // In case of a COMMA-based check, "check" must be a non-null first operand of a non-null COMMA. // In case of a standalone check, "comma" must be null and "check" - "stmt"'s root. // GenTree* Compiler::optRemoveRangeCheck(GenTreeBoundsChk* check, GenTree* comma, Statement* stmt) { #if !REARRANGE_ADDS noway_assert(!"can't remove range checks without REARRANGE_ADDS right now"); #endif noway_assert(stmt != nullptr); noway_assert((comma != nullptr && comma->OperIs(GT_COMMA) && comma->gtGetOp1() == check) || (check != nullptr && check->OperIs(GT_BOUNDS_CHECK) && comma == nullptr)); noway_assert(check->OperIs(GT_BOUNDS_CHECK)); GenTree* tree = comma != nullptr ? comma : check; #ifdef DEBUG if (verbose) { printf("Before optRemoveRangeCheck:\n"); gtDispTree(tree); } #endif // Extract side effects GenTree* sideEffList = nullptr; gtExtractSideEffList(check, &sideEffList, GTF_ASG); if (sideEffList != nullptr) { // We've got some side effects. if (tree->OperIs(GT_COMMA)) { // Make the comma handle them. tree->AsOp()->gtOp1 = sideEffList; } else { // Make the statement execute them instead of the check. stmt->SetRootNode(sideEffList); tree = sideEffList; } } else { check->gtBashToNOP(); } if (tree->OperIs(GT_COMMA)) { // TODO-CQ: We should also remove the GT_COMMA, but in any case we can no longer CSE the GT_COMMA. tree->gtFlags |= GTF_DONT_CSE; } gtUpdateSideEffects(stmt, tree); // Recalculate the GetCostSz(), etc... gtSetStmtInfo(stmt); // Re-thread the nodes if necessary if (fgStmtListThreaded) { fgSetStmtSeq(stmt); } #ifdef DEBUG if (verbose) { // gtUpdateSideEffects can update the side effects for ancestors in the tree, so display the whole statement // tree, not just the sub-tree. printf("After optRemoveRangeCheck for [%06u]:\n", dspTreeID(tree)); gtDispTree(stmt->GetRootNode()); } #endif return check; } //------------------------------------------------------------------------------ // optRemoveStandaloneRangeCheck : A thin wrapper over optRemoveRangeCheck that removes standalone checks. // // Arguments: // check - The standalone top-level CHECK node. // stmt - The statement "check" is a root node of. // // Return Value: // If "check" has no side effects, it is retuned, bashed to a no-op. // If it has side effects, the tree that executes them is returned. // GenTree* Compiler::optRemoveStandaloneRangeCheck(GenTreeBoundsChk* check, Statement* stmt) { assert(check != nullptr); assert(stmt != nullptr); assert(check == stmt->GetRootNode()); return optRemoveRangeCheck(check, nullptr, stmt); } //------------------------------------------------------------------------------ // optRemoveCommaBasedRangeCheck : A thin wrapper over optRemoveRangeCheck that removes COMMA-based checks. // // Arguments: // comma - GT_COMMA of which the first operand is the CHECK to be removed. // stmt - The statement "comma" belongs to. // void Compiler::optRemoveCommaBasedRangeCheck(GenTree* comma, Statement* stmt) { assert(comma != nullptr && comma->OperIs(GT_COMMA)); assert(stmt != nullptr); assert(comma->gtGetOp1()->OperIs(GT_BOUNDS_CHECK)); optRemoveRangeCheck(comma->gtGetOp1()->AsBoundsChk(), comma, stmt); } /***************************************************************************** * Return the scale in an array reference, given a pointer to the * multiplication node. */ ssize_t Compiler::optGetArrayRefScaleAndIndex(GenTree* mul, GenTree** pIndex DEBUGARG(bool bRngChk)) { assert(mul); assert(mul->gtOper == GT_MUL || mul->gtOper == GT_LSH); assert(mul->AsOp()->gtOp2->IsCnsIntOrI()); ssize_t scale = mul->AsOp()->gtOp2->AsIntConCommon()->IconValue(); if (mul->gtOper == GT_LSH) { scale = ((ssize_t)1) << scale; } GenTree* index = mul->AsOp()->gtOp1; if (index->gtOper == GT_MUL && index->AsOp()->gtOp2->IsCnsIntOrI()) { // case of two cascading multiplications for constant int (e.g. * 20 morphed to * 5 * 4): // When index->gtOper is GT_MUL and index->AsOp()->gtOp2->gtOper is GT_CNS_INT (i.e. * 5), // we can bump up the scale from 4 to 5*4, and then change index to index->AsOp()->gtOp1. // Otherwise, we cannot optimize it. We will simply keep the original scale and index. scale *= index->AsOp()->gtOp2->AsIntConCommon()->IconValue(); index = index->AsOp()->gtOp1; } assert(!bRngChk || index->gtOper != GT_COMMA); if (pIndex) { *pIndex = index; } return scale; } //----------------------------------------------------------------------------- // OptTestInfo: Member of OptBoolsDsc struct used to test if a GT_JTRUE or GT_RETURN node // is a boolean comparison // struct OptTestInfo { GenTree* testTree; // The root node of basic block with GT_JTRUE or GT_RETURN type to check boolean condition on GenTree* compTree; // The compare node (i.e. GT_EQ or GT_NE node) of the testTree bool isBool; // If the compTree is boolean expression }; //----------------------------------------------------------------------------- // OptBoolsDsc: Descriptor used for Boolean Optimization // class OptBoolsDsc { public: OptBoolsDsc(BasicBlock* b1, BasicBlock* b2, Compiler* comp) { m_b1 = b1; m_b2 = b2; m_b3 = nullptr; m_comp = comp; } private: BasicBlock* m_b1; // The first basic block with the BBJ_COND conditional jump type BasicBlock* m_b2; // The next basic block of m_b1. Either BBJ_COND or BBJ_RETURN type BasicBlock* m_b3; // m_b1->bbJumpDest. Null if m_b2 is not a return block. Compiler* m_comp; // The pointer to the Compiler instance OptTestInfo m_testInfo1; // The first test info OptTestInfo m_testInfo2; // The second test info GenTree* m_t3; // The root node of the first statement of m_b3 GenTree* m_c1; // The first operand of m_testInfo1.compTree GenTree* m_c2; // The first operand of m_testInfo2.compTree bool m_sameTarget; // if m_b1 and m_b2 jumps to the same destination genTreeOps m_foldOp; // The fold operator (e.g., GT_AND or GT_OR) var_types m_foldType; // The type of the folded tree genTreeOps m_cmpOp; // The comparison operator (e.g., GT_EQ or GT_NE) public: bool optOptimizeBoolsCondBlock(); bool optOptimizeBoolsReturnBlock(BasicBlock* b3); #ifdef DEBUG void optOptimizeBoolsGcStress(); #endif private: Statement* optOptimizeBoolsChkBlkCond(); GenTree* optIsBoolComp(OptTestInfo* pOptTest); bool optOptimizeBoolsChkTypeCostCond(); void optOptimizeBoolsUpdateTrees(); }; //----------------------------------------------------------------------------- // optOptimizeBoolsCondBlock: Optimize boolean when bbJumpKind of both m_b1 and m_b2 are BBJ_COND // // Returns: // true if boolean optimization is done and m_b1 and m_b2 are folded into m_b1, else false. // // Notes: // m_b1 and m_b2 are set on entry. // // Case 1: if b1.bbJumpDest == b2.bbJumpDest, it transforms // B1 : brtrue(t1, Bx) // B2 : brtrue(t2, Bx) // B3 : // to // B1 : brtrue(t1|t2, BX) // B3 : // // For example, (x == 0 && y == 0 && z == 0) generates // B1: GT_JTRUE (BBJ_COND), jump to B4 // B2: GT_JTRUE (BBJ_COND), jump to B4 // B3: GT_RETURN (BBJ_RETURN) // B4: GT_RETURN (BBJ_RETURN) // and B1 and B2 are folded into B1: // B1: GT_JTRUE (BBJ_COND), jump to B4 // B3: GT_RETURN (BBJ_RETURN) // B4: GT_RETURN (BBJ_RETURN) // // Case 2: if B1.bbJumpDest == B2->bbNext, it transforms // B1 : brtrue(t1, B3) // B2 : brtrue(t2, Bx) // B3 : // to // B1 : brtrue((!t1) && t2, Bx) // B3 : // bool OptBoolsDsc::optOptimizeBoolsCondBlock() { assert(m_b1 != nullptr && m_b2 != nullptr && m_b3 == nullptr); // Check if m_b1 and m_b2 jump to the same target and get back pointers to m_testInfo1 and t2 tree nodes m_t3 = nullptr; // Check if m_b1 and m_b2 have the same bbJumpDest if (m_b1->bbJumpDest == m_b2->bbJumpDest) { // Given the following sequence of blocks : // B1: brtrue(t1, BX) // B2: brtrue(t2, BX) // B3: // we will try to fold it to : // B1: brtrue(t1|t2, BX) // B3: m_sameTarget = true; } else if (m_b1->bbJumpDest == m_b2->bbNext) { // Given the following sequence of blocks : // B1: brtrue(t1, B3) // B2: brtrue(t2, BX) // B3: // we will try to fold it to : // B1: brtrue((!t1)&&t2, BX) // B3: m_sameTarget = false; } else { return false; } Statement* const s1 = optOptimizeBoolsChkBlkCond(); if (s1 == nullptr) { return false; } // Find the branch conditions of m_b1 and m_b2 m_c1 = optIsBoolComp(&m_testInfo1); if (m_c1 == nullptr) { return false; } m_c2 = optIsBoolComp(&m_testInfo2); if (m_c2 == nullptr) { return false; } // Find the type and cost conditions of m_testInfo1 and m_testInfo2 if (!optOptimizeBoolsChkTypeCostCond()) { return false; } // Get the fold operator and the comparison operator genTreeOps foldOp; genTreeOps cmpOp; var_types foldType = m_c1->TypeGet(); if (varTypeIsGC(foldType)) { foldType = TYP_I_IMPL; } assert(m_testInfo1.compTree->gtOper == GT_EQ || m_testInfo1.compTree->gtOper == GT_NE); if (m_sameTarget) { // Both conditions must be the same if (m_testInfo1.compTree->gtOper != m_testInfo2.compTree->gtOper) { return false; } if (m_testInfo1.compTree->gtOper == GT_EQ) { // t1:c1==0 t2:c2==0 ==> Branch to BX if either value is 0 // So we will branch to BX if (c1&c2)==0 foldOp = GT_AND; cmpOp = GT_EQ; } else { // t1:c1!=0 t2:c2!=0 ==> Branch to BX if either value is non-0 // So we will branch to BX if (c1|c2)!=0 foldOp = GT_OR; cmpOp = GT_NE; } } else { // The m_b1 condition must be the reverse of the m_b2 condition because the only operators // that we will see here are GT_EQ and GT_NE. So, if they are not the same, we have one of each. if (m_testInfo1.compTree->gtOper == m_testInfo2.compTree->gtOper) { return false; } if (m_testInfo1.compTree->gtOper == GT_EQ) { // t1:c1==0 t2:c2!=0 ==> Branch to BX if both values are non-0 // So we will branch to BX if (c1&c2)!=0 foldOp = GT_AND; cmpOp = GT_NE; } else { // t1:c1!=0 t2:c2==0 ==> Branch to BX if both values are 0 // So we will branch to BX if (c1|c2)==0 foldOp = GT_OR; cmpOp = GT_EQ; } } // Anding requires both values to be 0 or 1 if ((foldOp == GT_AND) && (!m_testInfo1.isBool || !m_testInfo2.isBool)) { return false; } // // Now update the trees // m_foldOp = foldOp; m_foldType = foldType; m_cmpOp = cmpOp; optOptimizeBoolsUpdateTrees(); #ifdef DEBUG if (m_comp->verbose) { printf("Folded %sboolean conditions of " FMT_BB " and " FMT_BB " to :\n", m_c2->OperIsLeaf() ? "" : "non-leaf ", m_b1->bbNum, m_b2->bbNum); m_comp->gtDispStmt(s1); printf("\n"); } #endif // Return true to continue the bool optimization for the rest of the BB chain return true; } //----------------------------------------------------------------------------- // optOptimizeBoolsChkBlkCond: Checks block conditions if it can be boolean optimized // // Return: // If all conditions pass, returns the last statement of m_b1, else return nullptr. // // Notes: // This method checks if the second (and third block for cond/return/return case) contains only one statement, // and checks if tree operators are of the right type, e.g, GT_JTRUE, GT_RETURN. // // On entry, m_b1, m_b2 are set and m_b3 is set for cond/return/return case. // If it passes all the conditions, m_testInfo1.testTree, m_testInfo2.testTree and m_t3 are set // to the root nodes of m_b1, m_b2 and m_b3 each. // SameTarget is also updated to true if m_b1 and m_b2 jump to the same destination. // Statement* OptBoolsDsc::optOptimizeBoolsChkBlkCond() { assert(m_b1 != nullptr && m_b2 != nullptr); bool optReturnBlock = false; if (m_b3 != nullptr) { optReturnBlock = true; } // Find the block conditions of m_b1 and m_b2 if (m_b2->countOfInEdges() > 1 || (optReturnBlock && m_b3->countOfInEdges() > 1)) { return nullptr; } // Find the condition for the first block Statement* s1 = m_b1->lastStmt(); GenTree* testTree1 = s1->GetRootNode(); assert(testTree1->gtOper == GT_JTRUE); // The second and the third block must contain a single statement Statement* s2 = m_b2->firstStmt(); if (s2->GetPrevStmt() != s2) { return nullptr; } GenTree* testTree2 = s2->GetRootNode(); if (!optReturnBlock) { assert(testTree2->gtOper == GT_JTRUE); } else { if (testTree2->gtOper != GT_RETURN) { return nullptr; } Statement* s3 = m_b3->firstStmt(); if (s3->GetPrevStmt() != s3) { return nullptr; } GenTree* testTree3 = s3->GetRootNode(); if (testTree3->gtOper != GT_RETURN) { return nullptr; } if (!varTypeIsIntegral(testTree2->TypeGet()) || !varTypeIsIntegral(testTree3->TypeGet())) { return nullptr; } // The third block is Return with "CNS_INT int 0/1" if (testTree3->AsOp()->gtOp1->gtOper != GT_CNS_INT) { return nullptr; } if (testTree3->AsOp()->gtOp1->gtType != TYP_INT) { return nullptr; } m_t3 = testTree3; } m_testInfo1.testTree = testTree1; m_testInfo2.testTree = testTree2; return s1; } //----------------------------------------------------------------------------- // optOptimizeBoolsChkTypeCostCond: Checks if type conditions meet the folding condition, and // if cost to fold is not too expensive // // Return: // True if it meets type conditions and cost conditions. Else false. // bool OptBoolsDsc::optOptimizeBoolsChkTypeCostCond() { assert(m_testInfo1.compTree->OperIs(GT_EQ, GT_NE) && m_testInfo1.compTree->AsOp()->gtOp1 == m_c1); assert(m_testInfo2.compTree->OperIs(GT_EQ, GT_NE) && m_testInfo2.compTree->AsOp()->gtOp1 == m_c2); // // Leave out floats where the bit-representation is more complicated // - there are two representations for 0. // if (varTypeIsFloating(m_c1->TypeGet()) || varTypeIsFloating(m_c2->TypeGet())) { return false; } // Make sure the types involved are of the same sizes if (genTypeSize(m_c1->TypeGet()) != genTypeSize(m_c2->TypeGet())) { return false; } if (genTypeSize(m_testInfo1.compTree->TypeGet()) != genTypeSize(m_testInfo2.compTree->TypeGet())) { return false; } #ifdef TARGET_ARMARCH // Skip the small operand which we cannot encode. if (varTypeIsSmall(m_c1->TypeGet())) return false; #endif // The second condition must not contain side effects if (m_c2->gtFlags & GTF_GLOB_EFFECT) { return false; } // The second condition must not be too expensive m_comp->gtPrepareCost(m_c2); if (m_c2->GetCostEx() > 12) { return false; } return true; } //----------------------------------------------------------------------------- // optOptimizeBoolsUpdateTrees: Fold the trees based on fold type and comparison type, // update the edges, unlink removed blocks and update loop table // void OptBoolsDsc::optOptimizeBoolsUpdateTrees() { assert(m_b1 != nullptr && m_b2 != nullptr); bool optReturnBlock = false; if (m_b3 != nullptr) { optReturnBlock = true; } assert(m_foldOp != NULL && m_foldType != NULL && m_c1 != nullptr && m_c2 != nullptr); GenTree* cmpOp1 = m_comp->gtNewOperNode(m_foldOp, m_foldType, m_c1, m_c2); if (m_testInfo1.isBool && m_testInfo2.isBool) { // When we 'OR'/'AND' two booleans, the result is boolean as well cmpOp1->gtFlags |= GTF_BOOLEAN; } GenTree* t1Comp = m_testInfo1.compTree; t1Comp->SetOper(m_cmpOp); t1Comp->AsOp()->gtOp1 = cmpOp1; t1Comp->AsOp()->gtOp2->gtType = m_foldType; // Could have been varTypeIsGC() if (optReturnBlock) { // Update tree when m_b1 is BBJ_COND and m_b2 and m_b3 are GT_RETURN (BBJ_RETURN) t1Comp->AsOp()->gtOp2->AsIntCon()->gtIconVal = 0; m_testInfo1.testTree->gtOper = GT_RETURN; m_testInfo1.testTree->gtType = m_testInfo2.testTree->gtType; // Update the return count of flow graph assert(m_comp->fgReturnCount >= 2); --m_comp->fgReturnCount; } #if FEATURE_SET_FLAGS // For comparisons against zero we will have the GTF_SET_FLAGS set // and this can cause an assert to fire in fgMoveOpsLeft(GenTree* tree) // during the CSE phase. // // So make sure to clear any GTF_SET_FLAGS bit on these operations // as they are no longer feeding directly into a comparisons against zero // Make sure that the GTF_SET_FLAGS bit is cleared. // Fix 388436 ARM JitStress WP7 m_c1->gtFlags &= ~GTF_SET_FLAGS; m_c2->gtFlags &= ~GTF_SET_FLAGS; // The new top level node that we just created does feed directly into // a comparison against zero, so set the GTF_SET_FLAGS bit so that // we generate an instruction that sets the flags, which allows us // to omit the cmp with zero instruction. // Request that the codegen for cmpOp1 sets the condition flags // when it generates the code for cmpOp1. // cmpOp1->gtRequestSetFlags(); #endif if (!optReturnBlock) { // Update edges if m_b1: BBJ_COND and m_b2: BBJ_COND flowList* edge1 = m_comp->fgGetPredForBlock(m_b1->bbJumpDest, m_b1); flowList* edge2; if (m_sameTarget) { edge2 = m_comp->fgGetPredForBlock(m_b2->bbJumpDest, m_b2); } else { edge2 = m_comp->fgGetPredForBlock(m_b2->bbNext, m_b2); m_comp->fgRemoveRefPred(m_b1->bbJumpDest, m_b1); m_b1->bbJumpDest = m_b2->bbJumpDest; m_comp->fgAddRefPred(m_b2->bbJumpDest, m_b1); } assert(edge1 != nullptr); assert(edge2 != nullptr); weight_t edgeSumMin = edge1->edgeWeightMin() + edge2->edgeWeightMin(); weight_t edgeSumMax = edge1->edgeWeightMax() + edge2->edgeWeightMax(); if ((edgeSumMax >= edge1->edgeWeightMax()) && (edgeSumMax >= edge2->edgeWeightMax())) { edge1->setEdgeWeights(edgeSumMin, edgeSumMax, m_b1->bbJumpDest); } else { edge1->setEdgeWeights(BB_ZERO_WEIGHT, BB_MAX_WEIGHT, m_b1->bbJumpDest); } } /* Modify the target of the conditional jump and update bbRefs and bbPreds */ if (optReturnBlock) { m_b1->bbJumpDest = nullptr; m_b1->bbJumpKind = BBJ_RETURN; #ifdef DEBUG m_b1->bbJumpSwt = m_b2->bbJumpSwt; #endif assert(m_b2->bbJumpKind == BBJ_RETURN); assert(m_b1->bbNext == m_b2); assert(m_b3 != nullptr); } else { assert(m_b1->bbJumpKind == BBJ_COND); assert(m_b2->bbJumpKind == BBJ_COND); assert(m_b1->bbJumpDest == m_b2->bbJumpDest); assert(m_b1->bbNext == m_b2); assert(m_b2->bbNext != nullptr); } if (!optReturnBlock) { // Update bbRefs and bbPreds // // Replace pred 'm_b2' for 'm_b2->bbNext' with 'm_b1' // Remove pred 'm_b2' for 'm_b2->bbJumpDest' m_comp->fgReplacePred(m_b2->bbNext, m_b2, m_b1); m_comp->fgRemoveRefPred(m_b2->bbJumpDest, m_b2); } // Get rid of the second block m_comp->fgUnlinkBlock(m_b2); m_b2->bbFlags |= BBF_REMOVED; // If m_b2 was the last block of a try or handler, update the EH table. m_comp->ehUpdateForDeletedBlock(m_b2); if (optReturnBlock) { // Get rid of the third block m_comp->fgUnlinkBlock(m_b3); m_b3->bbFlags |= BBF_REMOVED; // If m_b3 was the last block of a try or handler, update the EH table. m_comp->ehUpdateForDeletedBlock(m_b3); } // Update loop table m_comp->fgUpdateLoopsAfterCompacting(m_b1, m_b2); if (optReturnBlock) { m_comp->fgUpdateLoopsAfterCompacting(m_b1, m_b3); } } //----------------------------------------------------------------------------- // optOptimizeBoolsReturnBlock: Optimize boolean when m_b1 is BBJ_COND and m_b2 and m_b3 are BBJ_RETURN // // Arguments: // b3: Pointer to basic block b3 // // Returns: // true if boolean optimization is done and m_b1, m_b2 and m_b3 are folded into m_b1, else false. // // Notes: // m_b1, m_b2 and m_b3 of OptBoolsDsc are set on entry. // // if B1.bbJumpDest == b3, it transforms // B1 : brtrue(t1, B3) // B2 : ret(t2) // B3 : ret(0) // to // B1 : ret((!t1) && t2) // // For example, (x==0 && y==0) generates: // B1: GT_JTRUE (BBJ_COND), jumps to B3 // B2: GT_RETURN (BBJ_RETURN) // B3: GT_RETURN (BBJ_RETURN), // and it is folded into // B1: GT_RETURN (BBJ_RETURN) // bool OptBoolsDsc::optOptimizeBoolsReturnBlock(BasicBlock* b3) { assert(m_b1 != nullptr && m_b2 != nullptr); // m_b3 is set for cond/return/return case m_b3 = b3; m_sameTarget = false; Statement* const s1 = optOptimizeBoolsChkBlkCond(); if (s1 == nullptr) { return false; } // Find the branch conditions of m_b1 and m_b2 m_c1 = optIsBoolComp(&m_testInfo1); if (m_c1 == nullptr) { return false; } m_c2 = optIsBoolComp(&m_testInfo2); if (m_c2 == nullptr) { return false; } // Find the type and cost conditions of m_testInfo1 and m_testInfo2 if (!optOptimizeBoolsChkTypeCostCond()) { return false; } // Get the fold operator (m_foldOp, e.g., GT_OR/GT_AND) and // the comparison operator (m_cmpOp, e.g., GT_EQ/GT_NE) var_types foldType = m_c1->TypeGet(); if (varTypeIsGC(foldType)) { foldType = TYP_I_IMPL; } m_foldType = foldType; m_foldOp = GT_NONE; m_cmpOp = GT_NONE; genTreeOps foldOp; genTreeOps cmpOp; ssize_t it1val = m_testInfo1.compTree->AsOp()->gtOp2->AsIntCon()->gtIconVal; ssize_t it2val = m_testInfo2.compTree->AsOp()->gtOp2->AsIntCon()->gtIconVal; ssize_t it3val = m_t3->AsOp()->gtOp1->AsIntCon()->gtIconVal; if ((m_testInfo1.compTree->gtOper == GT_NE && m_testInfo2.compTree->gtOper == GT_EQ) && (it1val == 0 && it2val == 0 && it3val == 0)) { // Case: x == 0 && y == 0 // t1:c1!=0 t2:c2==0 t3:c3==0 // ==> true if (c1|c2)==0 foldOp = GT_OR; cmpOp = GT_EQ; } else if ((m_testInfo1.compTree->gtOper == GT_EQ && m_testInfo2.compTree->gtOper == GT_NE) && (it1val == 0 && it2val == 0 && it3val == 0)) { // Case: x == 1 && y ==1 // t1:c1!=1 t2:c2==1 t3:c3==0 is reversed from optIsBoolComp() to: t1:c1==0 t2:c2!=0 t3:c3==0 // ==> true if (c1&c2)!=0 foldOp = GT_AND; cmpOp = GT_NE; } else if ((m_testInfo1.compTree->gtOper == GT_EQ && m_testInfo2.compTree->gtOper == GT_EQ) && (it1val == 0 && it2val == 0 && it3val == 1)) { // Case: x == 0 || y == 0 // t1:c1==0 t2:c2==0 t3:c3==1 // ==> true if (c1&c2)==0 foldOp = GT_AND; cmpOp = GT_EQ; } else if ((m_testInfo1.compTree->gtOper == GT_NE && m_testInfo2.compTree->gtOper == GT_NE) && (it1val == 0 && it2val == 0 && it3val == 1)) { // Case: x == 1 || y == 1 // t1:c1==1 t2:c2==1 t3:c3==1 is reversed from optIsBoolComp() to: t1:c1!=0 t2:c2!=0 t3:c3==1 // ==> true if (c1|c2)!=0 foldOp = GT_OR; cmpOp = GT_NE; } else { // Require NOT operation for operand(s). Do Not fold. return false; } if ((foldOp == GT_AND || cmpOp == GT_NE) && (!m_testInfo1.isBool || !m_testInfo2.isBool)) { // x == 1 && y == 1: Skip cases where x or y is greather than 1, e.g., x=3, y=1 // x == 0 || y == 0: Skip cases where x and y have opposite bits set, e.g., x=2, y=1 // x == 1 || y == 1: Skip cases where either x or y is greater than 1, e.g., x=2, y=0 return false; } m_foldOp = foldOp; m_cmpOp = cmpOp; // Now update the trees optOptimizeBoolsUpdateTrees(); #ifdef DEBUG if (m_comp->verbose) { printf("Folded %sboolean conditions of " FMT_BB ", " FMT_BB " and " FMT_BB " to :\n", m_c2->OperIsLeaf() ? "" : "non-leaf ", m_b1->bbNum, m_b2->bbNum, m_b3->bbNum); m_comp->gtDispStmt(s1); printf("\n"); } #endif // Return true to continue the bool optimization for the rest of the BB chain return true; } //----------------------------------------------------------------------------- // optOptimizeBoolsGcStress: Replace x==null with (x|x)==0 if x is a GC-type. // This will stress code-gen and the emitter to make sure they support such trees. // #ifdef DEBUG void OptBoolsDsc::optOptimizeBoolsGcStress() { if (!m_comp->compStressCompile(m_comp->STRESS_OPT_BOOLS_GC, 20)) { return; } assert(m_b1->bbJumpKind == BBJ_COND); GenTree* cond = m_b1->lastStmt()->GetRootNode(); assert(cond->gtOper == GT_JTRUE); OptTestInfo test; test.testTree = cond; GenTree* comparand = optIsBoolComp(&test); if (comparand == nullptr || !varTypeIsGC(comparand->TypeGet())) { return; } GenTree* relop = test.compTree; bool isBool = test.isBool; if (comparand->gtFlags & (GTF_ASG | GTF_CALL | GTF_ORDER_SIDEEFF)) { return; } GenTree* comparandClone = m_comp->gtCloneExpr(comparand); noway_assert(relop->AsOp()->gtOp1 == comparand); genTreeOps oper = m_comp->compStressCompile(m_comp->STRESS_OPT_BOOLS_GC, 50) ? GT_OR : GT_AND; relop->AsOp()->gtOp1 = m_comp->gtNewOperNode(oper, TYP_I_IMPL, comparand, comparandClone); // Comparand type is already checked, and we have const int, there is no harm // morphing it into a TYP_I_IMPL. noway_assert(relop->AsOp()->gtOp2->gtOper == GT_CNS_INT); relop->AsOp()->gtOp2->gtType = TYP_I_IMPL; } #endif //----------------------------------------------------------------------------- // optIsBoolComp: Function used by folding of boolean conditionals // // Arguments: // pOptTest The test info for the test tree // // Return: // On success, return the first operand (gtOp1) of compTree, else return nullptr. // // Notes: // On entry, testTree is set. // On success, compTree is set to the compare node (i.e. GT_EQ or GT_NE) of the testTree. // isBool is set to true if the comparand (i.e., operand 1 of compTree is boolean. Otherwise, false. // // Given a GT_JTRUE or GT_RETURN node, this method checks if it is a boolean comparison // of the form "if (boolVal ==/!= 0/1)".This is translated into // a GT_EQ/GT_NE node with "opr1" being a boolean lclVar and "opr2" the const 0/1. // // When isBool == true, if the comparison was against a 1 (i.e true) // then we morph the tree by reversing the GT_EQ/GT_NE and change the 1 to 0. // GenTree* OptBoolsDsc::optIsBoolComp(OptTestInfo* pOptTest) { pOptTest->isBool = false; assert(pOptTest->testTree->gtOper == GT_JTRUE || pOptTest->testTree->gtOper == GT_RETURN); GenTree* cond = pOptTest->testTree->AsOp()->gtOp1; // The condition must be "!= 0" or "== 0" if ((cond->gtOper != GT_EQ) && (cond->gtOper != GT_NE)) { return nullptr; } // Return the compare node to the caller pOptTest->compTree = cond; // Get hold of the comparands GenTree* opr1 = cond->AsOp()->gtOp1; GenTree* opr2 = cond->AsOp()->gtOp2; if (opr2->gtOper != GT_CNS_INT) { return nullptr; } if (!opr2->IsIntegralConst(0) && !opr2->IsIntegralConst(1)) { return nullptr; } ssize_t ival2 = opr2->AsIntCon()->gtIconVal; // Is the value a boolean? // We can either have a boolean expression (marked GTF_BOOLEAN) or // a local variable that is marked as being boolean (lvIsBoolean) if (opr1->gtFlags & GTF_BOOLEAN) { pOptTest->isBool = true; } else if ((opr1->gtOper == GT_CNS_INT) && (opr1->IsIntegralConst(0) || opr1->IsIntegralConst(1))) { pOptTest->isBool = true; } else if (opr1->gtOper == GT_LCL_VAR) { // is it a boolean local variable? unsigned lclNum = opr1->AsLclVarCommon()->GetLclNum(); noway_assert(lclNum < m_comp->lvaCount); if (m_comp->lvaTable[lclNum].lvIsBoolean) { pOptTest->isBool = true; } } // Was our comparison against the constant 1 (i.e. true) if (ival2 == 1) { // If this is a boolean expression tree we can reverse the relop // and change the true to false. if (pOptTest->isBool) { m_comp->gtReverseCond(cond); opr2->AsIntCon()->gtIconVal = 0; } else { return nullptr; } } return opr1; } //----------------------------------------------------------------------------- // optOptimizeBools: Folds boolean conditionals for GT_JTRUE/GT_RETURN nodes // // Notes: // If the operand of GT_JTRUE/GT_RETURN node is GT_EQ/GT_NE of the form // "if (boolVal ==/!= 0/1)", the GT_EQ/GT_NE nodes are translated into a // GT_EQ/GT_NE node with // "op1" being a boolean GT_OR/GT_AND lclVar and // "op2" the const 0/1. // For example, the folded tree for the below boolean optimization is shown below: // Case 1: (x == 0 && y ==0) => (x | y) == 0 // * RETURN int // \--* EQ int // +--* OR int // | +--* LCL_VAR int V00 arg0 // | \--* LCL_VAR int V01 arg1 // \--* CNS_INT int 0 // // Case 2: (x == null && y == null) ==> (x | y) == 0 // * RETURN int // \-- * EQ int // + -- * OR long // | +-- * LCL_VAR ref V00 arg0 // | \-- * LCL_VAR ref V01 arg1 // \-- * CNS_INT long 0 // // Case 3: (x == 0 && y == 0 && z == 0) ==> ((x | y) | z) == 0 // * RETURN int // \-- * EQ int // + -- * OR int // | +-- * OR int // | | +-- * LCL_VAR int V00 arg0 // | | \-- * LCL_VAR int V01 arg1 // | \-- * LCL_VAR int V02 arg2 // \-- * CNS_INT int 0 // // Case 4: (x == 0 && y == 0 && z == 0 && w == 0) ==> (((x | y) | z) | w) == 0 // * RETURN int // \-- * EQ int // + * OR int // | +--* OR int // | | +--* OR int // | | | +--* LCL_VAR int V00 arg0 // | | | \--* LCL_VAR int V01 arg1 // | | \--* LCL_VAR int V02 arg2 // | \--* LCL_VAR int V03 arg3 // \--* CNS_INT int 0 // // Patterns that are not optimized include (x == 1 && y == 1), (x == 1 || y == 1), // (x == 0 || y == 0) because currently their comptree is not marked as boolean expression. // When m_foldOp == GT_AND or m_cmpOp == GT_NE, both compTrees must be boolean expression // in order to skip below cases when compTree is not boolean expression: // - x == 1 && y == 1 ==> (x&y)!=0: Skip cases where x or y is greather than 1, e.g., x=3, y=1 // - x == 1 || y == 1 ==> (x|y)!=0: Skip cases where either x or y is greater than 1, e.g., x=2, y=0 // - x == 0 || y == 0 ==> (x&y)==0: Skip cases where x and y have opposite bits set, e.g., x=2, y=1 // void Compiler::optOptimizeBools() { #ifdef DEBUG if (verbose) { printf("*************** In optOptimizeBools()\n"); if (verboseTrees) { printf("Blocks/Trees before phase\n"); fgDispBasicBlocks(true); } } #endif bool change; do { change = false; for (BasicBlock* const b1 : Blocks()) { // We're only interested in conditional jumps here if (b1->bbJumpKind != BBJ_COND) { continue; } // If there is no next block, we're done BasicBlock* b2 = b1->bbNext; if (b2 == nullptr) { break; } // The next block must not be marked as BBF_DONT_REMOVE if (b2->bbFlags & BBF_DONT_REMOVE) { continue; } OptBoolsDsc optBoolsDsc(b1, b2, this); // The next block needs to be a condition or return block. if (b2->bbJumpKind == BBJ_COND) { if ((b1->bbJumpDest != b2->bbJumpDest) && (b1->bbJumpDest != b2->bbNext)) { continue; } // When it is conditional jumps if (optBoolsDsc.optOptimizeBoolsCondBlock()) { change = true; } } else if (b2->bbJumpKind == BBJ_RETURN) { // Set b3 to b1 jump destination BasicBlock* b3 = b1->bbJumpDest; // b3 must not be marked as BBF_DONT_REMOVE if (b3->bbFlags & BBF_DONT_REMOVE) { continue; } // b3 must be RETURN type if (b3->bbJumpKind != BBJ_RETURN) { continue; } if (optBoolsDsc.optOptimizeBoolsReturnBlock(b3)) { change = true; } } else { #ifdef DEBUG optBoolsDsc.optOptimizeBoolsGcStress(); #endif } } } while (change); #ifdef DEBUG fgDebugCheckBBlist(); #endif } typedef JitHashTable<unsigned, JitSmallPrimitiveKeyFuncs<unsigned>, unsigned> LclVarRefCounts; //------------------------------------------------------------------------------------------ // optRemoveRedundantZeroInits: Remove redundant zero intializations. // // Notes: // This phase iterates over basic blocks starting with the first basic block until there is no unique // basic block successor or until it detects a loop. It keeps track of local nodes it encounters. // When it gets to an assignment to a local variable or a local field, it checks whether the assignment // is the first reference to the local (or to the parent of the local field), and, if so, // it may do one of two optimizations: // 1. If the following conditions are true: // the local is untracked, // the rhs of the assignment is 0, // the local is guaranteed to be fully initialized in the prolog, // then the explicit zero initialization is removed. // 2. If the following conditions are true: // the assignment is to a local (and not a field), // the local is not lvLiveInOutOfHndlr or no exceptions can be thrown between the prolog and the assignment, // either the local has no gc pointers or there are no gc-safe points between the prolog and the assignment, // then the local is marked with lvHasExplicitInit which tells the codegen not to insert zero initialization // for this local in the prolog. void Compiler::optRemoveRedundantZeroInits() { #ifdef DEBUG if (verbose) { printf("*************** In optRemoveRedundantZeroInits()\n"); } #endif // DEBUG CompAllocator allocator(getAllocator(CMK_ZeroInit)); LclVarRefCounts refCounts(allocator); BitVecTraits bitVecTraits(lvaCount, this); BitVec zeroInitLocals = BitVecOps::MakeEmpty(&bitVecTraits); bool hasGCSafePoint = false; bool canThrow = false; assert(fgStmtListThreaded); for (BasicBlock* block = fgFirstBB; (block != nullptr) && ((block->bbFlags & BBF_MARKED) == 0); block = block->GetUniqueSucc()) { block->bbFlags |= BBF_MARKED; CompAllocator allocator(getAllocator(CMK_ZeroInit)); LclVarRefCounts defsInBlock(allocator); bool removedTrackedDefs = false; for (Statement* stmt = block->FirstNonPhiDef(); stmt != nullptr;) { Statement* next = stmt->GetNextStmt(); for (GenTree* const tree : stmt->TreeList()) { if (((tree->gtFlags & GTF_CALL) != 0)) { hasGCSafePoint = true; } if ((tree->gtFlags & GTF_EXCEPT) != 0) { canThrow = true; } switch (tree->gtOper) { case GT_LCL_VAR: case GT_LCL_FLD: case GT_LCL_VAR_ADDR: case GT_LCL_FLD_ADDR: { unsigned lclNum = tree->AsLclVarCommon()->GetLclNum(); unsigned* pRefCount = refCounts.LookupPointer(lclNum); if (pRefCount != nullptr) { *pRefCount = (*pRefCount) + 1; } else { refCounts.Set(lclNum, 1); } if ((tree->gtFlags & GTF_VAR_DEF) == 0) { break; } // We need to count the number of tracked var defs in the block // so that we can update block->bbVarDef if we remove any tracked var defs. LclVarDsc* const lclDsc = lvaGetDesc(lclNum); if (lclDsc->lvTracked) { unsigned* pDefsCount = defsInBlock.LookupPointer(lclNum); if (pDefsCount != nullptr) { *pDefsCount = (*pDefsCount) + 1; } else { defsInBlock.Set(lclNum, 1); } } else if (varTypeIsStruct(lclDsc) && ((tree->gtFlags & GTF_VAR_USEASG) == 0) && lvaGetPromotionType(lclDsc) != PROMOTION_TYPE_NONE) { for (unsigned i = lclDsc->lvFieldLclStart; i < lclDsc->lvFieldLclStart + lclDsc->lvFieldCnt; ++i) { if (lvaGetDesc(i)->lvTracked) { unsigned* pDefsCount = defsInBlock.LookupPointer(i); if (pDefsCount != nullptr) { *pDefsCount = (*pDefsCount) + 1; } else { defsInBlock.Set(i, 1); } } } } break; } case GT_ASG: { GenTreeOp* treeOp = tree->AsOp(); GenTreeLclVarCommon* lclVar; bool isEntire; if (!tree->DefinesLocal(this, &lclVar, &isEntire)) { break; } const unsigned lclNum = lclVar->GetLclNum(); LclVarDsc* const lclDsc = lvaGetDesc(lclNum); unsigned* pRefCount = refCounts.LookupPointer(lclNum); // pRefCount can't be null because the local node on the lhs of the assignment // must have already been seen. assert(pRefCount != nullptr); if (*pRefCount != 1) { break; } unsigned parentRefCount = 0; if (lclDsc->lvIsStructField && refCounts.Lookup(lclDsc->lvParentLcl, &parentRefCount) && (parentRefCount != 0)) { break; } unsigned fieldRefCount = 0; if (lclDsc->lvPromoted) { for (unsigned i = lclDsc->lvFieldLclStart; (fieldRefCount == 0) && (i < lclDsc->lvFieldLclStart + lclDsc->lvFieldCnt); ++i) { refCounts.Lookup(i, &fieldRefCount); } } if (fieldRefCount != 0) { break; } // The local hasn't been referenced before this assignment. bool removedExplicitZeroInit = false; if (treeOp->gtGetOp2()->IsIntegralConst(0)) { bool bbInALoop = (block->bbFlags & BBF_BACKWARD_JUMP) != 0; bool bbIsReturn = block->bbJumpKind == BBJ_RETURN; if (!bbInALoop || bbIsReturn) { if (BitVecOps::IsMember(&bitVecTraits, zeroInitLocals, lclNum) || (lclDsc->lvIsStructField && BitVecOps::IsMember(&bitVecTraits, zeroInitLocals, lclDsc->lvParentLcl)) || ((!lclDsc->lvTracked || !isEntire) && !fgVarNeedsExplicitZeroInit(lclNum, bbInALoop, bbIsReturn))) { // We are guaranteed to have a zero initialization in the prolog or a // dominating explicit zero initialization and the local hasn't been redefined // between the prolog and this explicit zero initialization so the assignment // can be safely removed. if (tree == stmt->GetRootNode()) { fgRemoveStmt(block, stmt); removedExplicitZeroInit = true; lclDsc->lvSuppressedZeroInit = 1; if (lclDsc->lvTracked) { removedTrackedDefs = true; unsigned* pDefsCount = defsInBlock.LookupPointer(lclNum); *pDefsCount = (*pDefsCount) - 1; } } } if (isEntire) { BitVecOps::AddElemD(&bitVecTraits, zeroInitLocals, lclNum); } *pRefCount = 0; } } if (!removedExplicitZeroInit && isEntire && (!canThrow || !lclDsc->lvLiveInOutOfHndlr)) { // If compMethodRequiresPInvokeFrame() returns true, lower may later // insert a call to CORINFO_HELP_INIT_PINVOKE_FRAME which is a gc-safe point. if (!lclDsc->HasGCPtr() || (!GetInterruptible() && !hasGCSafePoint && !compMethodRequiresPInvokeFrame())) { // The local hasn't been used and won't be reported to the gc between // the prolog and this explicit intialization. Therefore, it doesn't // require zero initialization in the prolog. lclDsc->lvHasExplicitInit = 1; JITDUMP("Marking " FMT_LP " as having an explicit init\n", lclNum); } } break; } default: break; } } stmt = next; } if (removedTrackedDefs) { LclVarRefCounts::KeyIterator iter(defsInBlock.Begin()); LclVarRefCounts::KeyIterator end(defsInBlock.End()); for (; !iter.Equal(end); iter++) { unsigned int lclNum = iter.Get(); if (defsInBlock[lclNum] == 0) { VarSetOps::RemoveElemD(this, block->bbVarDef, lvaGetDesc(lclNum)->lvVarIndex); } } } } for (BasicBlock* block = fgFirstBB; (block != nullptr) && ((block->bbFlags & BBF_MARKED) != 0); block = block->GetUniqueSucc()) { block->bbFlags &= ~BBF_MARKED; } } #ifdef DEBUG //------------------------------------------------------------------------ // optAnyChildNotRemoved: Recursively check the child loops of a loop to see if any of them // are still live (that is, not marked as LPFLG_REMOVED). This check is done when we are // removing a parent, just to notify that there is something odd about leaving a live child. // // Arguments: // loopNum - the loop number to check // bool Compiler::optAnyChildNotRemoved(unsigned loopNum) { assert(loopNum < optLoopCount); // Now recursively mark the children. for (BasicBlock::loopNumber l = optLoopTable[loopNum].lpChild; // l != BasicBlock::NOT_IN_LOOP; // l = optLoopTable[l].lpSibling) { if ((optLoopTable[l].lpFlags & LPFLG_REMOVED) == 0) { return true; } if (optAnyChildNotRemoved(l)) { return true; } } // All children were removed return false; } #endif // DEBUG //------------------------------------------------------------------------ // optMarkLoopRemoved: Mark the specified loop as removed (some optimization, such as unrolling, has made the // loop no longer exist). Note that only the given loop is marked as being removed; if it has any children, // they are not touched (but a warning message is output to the JitDump). // // Arguments: // loopNum - the loop number to remove // void Compiler::optMarkLoopRemoved(unsigned loopNum) { JITDUMP("Marking loop " FMT_LP " removed\n", loopNum); assert(loopNum < optLoopCount); LoopDsc& loop = optLoopTable[loopNum]; loop.lpFlags |= LPFLG_REMOVED; #ifdef DEBUG if (optAnyChildNotRemoved(loopNum)) { JITDUMP("Removed loop " FMT_LP " has one or more live children\n", loopNum); } // Note: we can't call `fgDebugCheckLoopTable()` here because if there are live children, it will assert. // Assume the caller is going to fix up the table and `bbNatLoopNum` block annotations before the next time // `fgDebugCheckLoopTable()` is called. #endif // DEBUG }
1
dotnet/runtime
66,234
Remove compiler warning suppression
Contributes to https://github.com/dotnet/runtime/issues/66154 All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address. ~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~ Upstream PR: https://github.com/libunwind/libunwind/pull/333 /cc @GrabYourPitchforks
AaronRobinsonMSFT
2022-03-05T03:45:49Z
2022-03-09T00:57:12Z
220e67755ae6323a01011b83070dff6f84b02519
853e494abd1c1e12d28a76829b4680af7f694afa
Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154 All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address. ~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~ Upstream PR: https://github.com/libunwind/libunwind/pull/333 /cc @GrabYourPitchforks
./src/coreclr/pal/inc/pal.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*++ Module Name: pal.h Abstract: CoreCLR Platform Adaptation Layer (PAL) header file. This file defines all types and API calls required by the CoreCLR when compiled for Unix-like systems. Defines which control the behavior of this include file: UNICODE - define it to set the Ansi/Unicode neutral names to be the ...W names. Otherwise the neutral names default to be the ...A names. PAL_IMPLEMENTATION - define it when implementing the PAL. Otherwise leave it undefined when consuming the PAL. Note: some fields in structs have been renamed from the original SDK documentation names, with _PAL_Undefined appended. This leaves the structure layout identical to its Win32 version, but prevents PAL consumers from inadvertently referencing undefined fields. If you want to add a PAL_ wrapper function to a native function in here, you also need to edit palinternal.h and win32pal.h. --*/ #ifndef __PAL_H__ #define __PAL_H__ #ifdef PAL_STDCPP_COMPAT #include <float.h> #include <limits.h> #include <stddef.h> #include <stdio.h> #include <stdlib.h> #include <stdarg.h> #include <stdint.h> #include <string.h> #include <errno.h> #include <ctype.h> #include <sys/stat.h> #include <sys/types.h> #include <unistd.h> #endif #ifdef __cplusplus extern "C" { #endif // This macro is used to standardize the wide character string literals between UNIX and Windows. // Unix L"" is UTF32, and on windows it's UTF16. Because of built-in assumptions on the size // of string literals, it's important to match behaviour between Unix and Windows. Unix will be defined // as u"" (char16_t) #define W(str) u##str // Undefine the QUOTE_MACRO_L helper and redefine it in terms of u. // The reason that we do this is that quote macro is defined in ndp\common\inc, // not inside of coreclr sources. #define QUOTE_MACRO_L(x) QUOTE_MACRO_u(x) #define QUOTE_MACRO_u_HELPER(x) u###x #define QUOTE_MACRO_u(x) QUOTE_MACRO_u_HELPER(x) #include <pal_error.h> #include <pal_mstypes.h> // Native system libray handle. // On Unix systems, NATIVE_LIBRARY_HANDLE type represents a library handle not registered with the PAL. typedef PVOID NATIVE_LIBRARY_HANDLE; /******************* Processor-specific glue *****************************/ #ifndef _MSC_VER #if defined(__i686__) && !defined(_M_IX86) #define _M_IX86 600 #elif defined(__i586__) && !defined(_M_IX86) #define _M_IX86 500 #elif defined(__i486__) && !defined(_M_IX86) #define _M_IX86 400 #elif defined(__i386__) && !defined(_M_IX86) #define _M_IX86 300 #elif defined(__x86_64__) && !defined(_M_AMD64) #define _M_AMD64 100 #elif defined(__arm__) && !defined(_M_ARM) #define _M_ARM 7 #elif defined(__aarch64__) && !defined(_M_ARM64) #define _M_ARM64 1 #elif defined(__loongarch64) && !defined(_M_LOONGARCH64) #define _M_LOONGARCH64 1 #elif defined(__s390x__) && !defined(_M_S390X) #define _M_S390X 1 #endif #if defined(_M_IX86) && !defined(HOST_X86) #define HOST_X86 #elif defined(_M_AMD64) && !defined(HOST_AMD64) #define HOST_AMD64 #elif defined(_M_ARM) && !defined(HOST_ARM) #define HOST_ARM #elif defined(_M_ARM64) && !defined(HOST_ARM64) #define HOST_ARM64 #elif defined(_M_LOONGARCH64) && !defined(HOST_LOONGARCH64) #define HOST_LOONGARCH64 #elif defined(_M_S390X) && !defined(HOST_S390X) #define HOST_S390X #endif #endif // !_MSC_VER /******************* ABI-specific glue *******************************/ #define MAX_PATH 260 #define _MAX_PATH 260 #define _MAX_DRIVE 3 /* max. length of drive component */ #define _MAX_DIR 256 /* max. length of path component */ #define _MAX_FNAME 256 /* max. length of file name component */ #define _MAX_EXT 256 /* max. length of extension component */ // In some Win32 APIs MAX_PATH is used for file names (even though 256 is the normal file system limit) // use _MAX_PATH_FNAME to indicate these cases #define MAX_PATH_FNAME MAX_PATH #define MAX_LONGPATH 1024 /* max. length of full pathname */ #define MAXSHORT 0x7fff #define MAXLONG 0x7fffffff #define MAXCHAR 0x7f #define MAXDWORD 0xffffffff // Sorting IDs. // // Note that the named locale APIs (eg CompareStringExEx) are recommended. // #define LANG_ENGLISH 0x09 /******************* Compiler-specific glue *******************************/ #ifndef THROW_DECL #if defined(_MSC_VER) || !defined(__cplusplus) #define THROW_DECL #else #define THROW_DECL throw() #endif // !_MSC_VER #endif // !THROW_DECL #ifdef __sun #define MATH_THROW_DECL #else #define MATH_THROW_DECL THROW_DECL #endif #if defined(_MSC_VER) #define DECLSPEC_ALIGN(x) __declspec(align(x)) #else #define DECLSPEC_ALIGN(x) __attribute__ ((aligned(x))) #endif #define DECLSPEC_NORETURN PAL_NORETURN #ifdef __clang_analyzer__ #define ANALYZER_NORETURN __attribute((analyzer_noreturn)) #else #define ANALYZER_NORETURN #endif #define EMPTY_BASES_DECL #if !defined(_MSC_VER) || defined(SOURCE_FORMATTING) #define __assume(x) (void)0 #define __annotation(x) #endif //!MSC_VER #define UNALIGNED #ifndef FORCEINLINE #if _MSC_VER < 1200 #define FORCEINLINE inline #else #define FORCEINLINE __forceinline #endif #endif #ifndef NOOPT_ATTRIBUTE #if defined(__llvm__) #define NOOPT_ATTRIBUTE optnone #elif defined(__GNUC__) #define NOOPT_ATTRIBUTE optimize("O0") #endif #endif #ifndef NODEBUG_ATTRIBUTE #if defined(__llvm__) #define NODEBUG_ATTRIBUTE __nodebug__ #elif defined(__GNUC__) #define NODEBUG_ATTRIBUTE __artificial__ #endif #endif #ifndef __has_cpp_attribute #define __has_cpp_attribute(x) (0) #endif #ifndef FALLTHROUGH #if __has_cpp_attribute(fallthrough) #define FALLTHROUGH [[fallthrough]] #else // __has_cpp_attribute(fallthrough) #define FALLTHROUGH #endif // __has_cpp_attribute(fallthrough) #endif // FALLTHROUGH #ifndef PAL_STDCPP_COMPAT #if __GNUC__ typedef __builtin_va_list va_list; /* We should consider if the va_arg definition here is actually necessary. Could we use the standard va_arg definition? */ #define va_start __builtin_va_start #define va_arg __builtin_va_arg #define va_copy __builtin_va_copy #define va_end __builtin_va_end #define VOID void #else // __GNUC__ typedef char * va_list; #define _INTSIZEOF(n) ( (sizeof(n) + sizeof(int) - 1) & ~(sizeof(int) - 1) ) #if _MSC_VER >= 1400 #ifdef __cplusplus #define _ADDRESSOF(v) ( &reinterpret_cast<const char &>(v) ) #else #define _ADDRESSOF(v) ( &(v) ) #endif #define _crt_va_start(ap,v) ( ap = (va_list)_ADDRESSOF(v) + _INTSIZEOF(v) ) #define _crt_va_arg(ap,t) ( *(t *)((ap += _INTSIZEOF(t)) - _INTSIZEOF(t)) ) #define _crt_va_end(ap) ( ap = (va_list)0 ) #define va_start _crt_va_start #define va_arg _crt_va_arg #define va_end _crt_va_end #else // _MSC_VER #define va_start(ap,v) (ap = (va_list) (&(v)) + _INTSIZEOF(v)) #define va_arg(ap,t) ( *(t *)((ap += _INTSIZEOF(t)) - _INTSIZEOF(t)) ) #define va_end(ap) #endif // _MSC_VER #define va_copy(dest,src) (dest = src) #endif // __GNUC__ #define CHAR_BIT 8 #define SCHAR_MIN (-128) #define SCHAR_MAX 127 #define UCHAR_MAX 0xff #define SHRT_MIN (-32768) #define SHRT_MAX 32767 #define USHRT_MAX 0xffff #define INT_MIN (-2147483647 - 1) #define INT_MAX 2147483647 #define UINT_MAX 0xffffffff // LONG_MIN, LONG_MAX, ULONG_MAX -- use INT32_MIN etc. instead. #define FLT_MAX 3.402823466e+38F #define DBL_MAX 1.7976931348623157e+308 #endif // !PAL_STDCPP_COMPAT /******************* PAL-Specific Entrypoints *****************************/ #define IsDebuggerPresent PAL_IsDebuggerPresent PALIMPORT BOOL PALAPI PAL_IsDebuggerPresent(); /* minimum signed 64 bit value */ #define _I64_MIN (I64(-9223372036854775807) - 1) /* maximum signed 64 bit value */ #define _I64_MAX I64(9223372036854775807) /* maximum unsigned 64 bit value */ #define _UI64_MAX UI64(0xffffffffffffffff) #define _I8_MAX SCHAR_MAX #define _I8_MIN SCHAR_MIN #define _I16_MAX SHRT_MAX #define _I16_MIN SHRT_MIN #define _I32_MAX INT_MAX #define _I32_MIN INT_MIN #define _UI8_MAX UCHAR_MAX #define _UI8_MIN UCHAR_MIN #define _UI16_MAX USHRT_MAX #define _UI16_MIN USHRT_MIN #define _UI32_MAX UINT_MAX #define _UI32_MIN UINT_MIN #undef NULL #if defined(__cplusplus) #define NULL 0 #else #define NULL ((PVOID)0) #endif #if defined(PAL_STDCPP_COMPAT) && !defined(__cplusplus) #define nullptr NULL #endif // defined(PAL_STDCPP_COMPAT) && !defined(__cplusplus) #ifndef PAL_STDCPP_COMPAT typedef __int64 time_t; #define _TIME_T_DEFINED #endif // !PAL_STDCPP_COMPAT #define DLL_PROCESS_ATTACH 1 #define DLL_THREAD_ATTACH 2 #define DLL_THREAD_DETACH 3 #define DLL_PROCESS_DETACH 0 #define PAL_INITIALIZE_NONE 0x00 #define PAL_INITIALIZE_SYNC_THREAD 0x01 #define PAL_INITIALIZE_EXEC_ALLOCATOR 0x02 #define PAL_INITIALIZE_STD_HANDLES 0x04 #define PAL_INITIALIZE_REGISTER_SIGTERM_HANDLER 0x08 #define PAL_INITIALIZE_DEBUGGER_EXCEPTIONS 0x10 #define PAL_INITIALIZE_ENSURE_STACK_SIZE 0x20 #define PAL_INITIALIZE_REGISTER_SIGNALS 0x40 #define PAL_INITIALIZE_REGISTER_ACTIVATION_SIGNAL 0x80 // PAL_Initialize() flags #define PAL_INITIALIZE (PAL_INITIALIZE_SYNC_THREAD | \ PAL_INITIALIZE_STD_HANDLES) // PAL_InitializeDLL() flags - don't start any of the helper threads or register any exceptions #define PAL_INITIALIZE_DLL PAL_INITIALIZE_NONE // PAL_InitializeCoreCLR() flags #define PAL_INITIALIZE_CORECLR (PAL_INITIALIZE | \ PAL_INITIALIZE_EXEC_ALLOCATOR | \ PAL_INITIALIZE_REGISTER_SIGTERM_HANDLER | \ PAL_INITIALIZE_DEBUGGER_EXCEPTIONS | \ PAL_INITIALIZE_ENSURE_STACK_SIZE | \ PAL_INITIALIZE_REGISTER_SIGNALS | \ PAL_INITIALIZE_REGISTER_ACTIVATION_SIGNAL) typedef DWORD (PALAPI_NOEXPORT *PTHREAD_START_ROUTINE)(LPVOID lpThreadParameter); typedef PTHREAD_START_ROUTINE LPTHREAD_START_ROUTINE; /******************* PAL-Specific Entrypoints *****************************/ PALIMPORT int PALAPI PAL_Initialize( int argc, char * const argv[]); PALIMPORT void PALAPI PAL_InitializeWithFlags( DWORD flags); PALIMPORT int PALAPI PAL_InitializeDLL(); PALIMPORT void PALAPI PAL_SetInitializeDLLFlags( DWORD flags); PALIMPORT DWORD PALAPI PAL_InitializeCoreCLR( const char *szExePath, BOOL runningInExe); /// <summary> /// This function shuts down PAL WITHOUT exiting the current process. /// </summary> PALIMPORT void PALAPI PAL_Shutdown( void); /// <summary> /// This function shuts down PAL and exits the current process. /// </summary> PALIMPORT void PALAPI PAL_Terminate( void); /// <summary> /// This function shuts down PAL and exits the current process with /// the specified exit code. /// </summary> PALIMPORT void PALAPI PAL_TerminateEx( int exitCode); typedef VOID (*PSHUTDOWN_CALLBACK)(bool isExecutingOnAltStack); PALIMPORT VOID PALAPI PAL_SetShutdownCallback( IN PSHUTDOWN_CALLBACK callback); // Must be the same as the copy in excep.h and the WriteDumpFlags enum in the diagnostics repo enum { GenerateDumpFlagsNone = 0x00, GenerateDumpFlagsLoggingEnabled = 0x01, GenerateDumpFlagsVerboseLoggingEnabled = 0x02, GenerateDumpFlagsCrashReportEnabled = 0x04 }; PALIMPORT BOOL PALAPI PAL_GenerateCoreDump( IN LPCSTR dumpName, IN INT dumpType, IN ULONG32 flags); typedef VOID (*PPAL_STARTUP_CALLBACK)( char *modulePath, HMODULE hModule, PVOID parameter); PALIMPORT DWORD PALAPI PAL_RegisterForRuntimeStartup( IN DWORD dwProcessId, IN LPCWSTR lpApplicationGroupId, IN PPAL_STARTUP_CALLBACK pfnCallback, IN PVOID parameter, OUT PVOID *ppUnregisterToken); PALIMPORT DWORD PALAPI PAL_UnregisterForRuntimeStartup( IN PVOID pUnregisterToken); PALIMPORT BOOL PALAPI PAL_NotifyRuntimeStarted(); PALIMPORT LPCSTR PALAPI PAL_GetApplicationGroupId(); static const unsigned int MAX_DEBUGGER_TRANSPORT_PIPE_NAME_LENGTH = MAX_PATH; PALIMPORT VOID PALAPI PAL_GetTransportName( const unsigned int MAX_TRANSPORT_NAME_LENGTH, OUT char *name, IN const char *prefix, IN DWORD id, IN const char *applicationGroupId, IN const char *suffix); PALIMPORT VOID PALAPI PAL_GetTransportPipeName( OUT char *name, IN DWORD id, IN const char *applicationGroupId, IN const char *suffix); PALIMPORT void PALAPI PAL_IgnoreProfileSignal(int signalNum); PALIMPORT HINSTANCE PALAPI PAL_RegisterModule( IN LPCSTR lpLibFileName); PALIMPORT VOID PALAPI PAL_UnregisterModule( IN HINSTANCE hInstance); PALIMPORT VOID PALAPI PAL_Random( IN OUT LPVOID lpBuffer, IN DWORD dwLength); PALIMPORT BOOL PALAPI PAL_OpenProcessMemory( IN DWORD processId, OUT DWORD* pHandle ); PALIMPORT VOID PALAPI PAL_CloseProcessMemory( IN DWORD handle ); PALIMPORT BOOL PALAPI PAL_ReadProcessMemory( IN DWORD handle, IN ULONG64 address, IN LPVOID buffer, IN SIZE_T size, OUT SIZE_T* numberOfBytesRead ); PALIMPORT BOOL PALAPI PAL_ProbeMemory( PVOID pBuffer, DWORD cbBuffer, BOOL fWriteAccess); PALIMPORT int PALAPI // Start the jitdump file PAL_PerfJitDump_Start(const char* path); PALIMPORT int PALAPI // Log a method to the jitdump file. PAL_PerfJitDump_LogMethod(void* pCode, size_t codeSize, const char* symbol, void* debugInfo, void* unwindInfo); PALIMPORT int PALAPI // Finish the jitdump file PAL_PerfJitDump_Finish(); /******************* winuser.h Entrypoints *******************************/ #define MB_OK 0x00000000L #define MB_OKCANCEL 0x00000001L #define MB_ABORTRETRYIGNORE 0x00000002L #define MB_YESNO 0x00000004L #define MB_RETRYCANCEL 0x00000005L #define MB_ICONHAND 0x00000010L #define MB_ICONQUESTION 0x00000020L #define MB_ICONEXCLAMATION 0x00000030L #define MB_ICONASTERISK 0x00000040L #define MB_ICONINFORMATION MB_ICONASTERISK #define MB_ICONSTOP MB_ICONHAND #define MB_ICONERROR MB_ICONHAND #define MB_DEFBUTTON1 0x00000000L #define MB_DEFBUTTON2 0x00000100L #define MB_DEFBUTTON3 0x00000200L #define MB_SYSTEMMODAL 0x00001000L #define MB_TASKMODAL 0x00002000L #define MB_SETFOREGROUND 0x00010000L #define MB_TOPMOST 0x00040000L #define MB_NOFOCUS 0x00008000L #define MB_DEFAULT_DESKTOP_ONLY 0x00020000L // Note: this is the NT 4.0 and greater value. #define MB_SERVICE_NOTIFICATION 0x00200000L #define MB_TYPEMASK 0x0000000FL #define MB_ICONMASK 0x000000F0L #define MB_DEFMASK 0x00000F00L #define IDOK 1 #define IDCANCEL 2 #define IDABORT 3 #define IDRETRY 4 #define IDIGNORE 5 #define IDYES 6 #define IDNO 7 PALIMPORT int PALAPI MessageBoxW( IN LPVOID hWnd, // NOTE: diff from winuser.h IN LPCWSTR lpText, IN LPCWSTR lpCaption, IN UINT uType); #ifdef UNICODE #define MessageBox MessageBoxW #else #define MessageBox MessageBoxA #endif // From win32.h #ifndef _CRTIMP #ifdef __GNUC__ #define _CRTIMP #else // __GNUC__ #define _CRTIMP __declspec(dllimport) #endif // __GNUC__ #endif // _CRTIMP /******************* winbase.h Entrypoints and defines ************************/ typedef struct _SECURITY_ATTRIBUTES { DWORD nLength; LPVOID lpSecurityDescriptor; BOOL bInheritHandle; } SECURITY_ATTRIBUTES, *PSECURITY_ATTRIBUTES, *LPSECURITY_ATTRIBUTES; #define _SH_DENYWR 0x20 /* deny write mode */ #define FILE_READ_DATA ( 0x0001 ) // file & pipe #define FILE_APPEND_DATA ( 0x0004 ) // file #define GENERIC_READ (0x80000000L) #define GENERIC_WRITE (0x40000000L) #define FILE_SHARE_READ 0x00000001 #define FILE_SHARE_WRITE 0x00000002 #define FILE_SHARE_DELETE 0x00000004 #define CREATE_NEW 1 #define CREATE_ALWAYS 2 #define OPEN_EXISTING 3 #define OPEN_ALWAYS 4 #define TRUNCATE_EXISTING 5 #define FILE_ATTRIBUTE_READONLY 0x00000001 #define FILE_ATTRIBUTE_HIDDEN 0x00000002 #define FILE_ATTRIBUTE_SYSTEM 0x00000004 #define FILE_ATTRIBUTE_DIRECTORY 0x00000010 #define FILE_ATTRIBUTE_ARCHIVE 0x00000020 #define FILE_ATTRIBUTE_DEVICE 0x00000040 #define FILE_ATTRIBUTE_NORMAL 0x00000080 #define FILE_FLAG_WRITE_THROUGH 0x80000000 #define FILE_FLAG_NO_BUFFERING 0x20000000 #define FILE_FLAG_RANDOM_ACCESS 0x10000000 #define FILE_FLAG_SEQUENTIAL_SCAN 0x08000000 #define FILE_FLAG_BACKUP_SEMANTICS 0x02000000 #define FILE_BEGIN 0 #define FILE_CURRENT 1 #define FILE_END 2 #define STILL_ACTIVE (0x00000103L) #define INVALID_SET_FILE_POINTER ((DWORD)-1) PALIMPORT HANDLE PALAPI CreateFileW( IN LPCWSTR lpFileName, IN DWORD dwDesiredAccess, IN DWORD dwShareMode, IN LPSECURITY_ATTRIBUTES lpSecurityAttributes, IN DWORD dwCreationDisposition, IN DWORD dwFlagsAndAttributes, IN HANDLE hTemplateFile); #ifdef UNICODE #define CreateFile CreateFileW #else #define CreateFile CreateFileA #endif PALIMPORT DWORD PALAPI SearchPathW( IN LPCWSTR lpPath, IN LPCWSTR lpFileName, IN LPCWSTR lpExtension, IN DWORD nBufferLength, OUT LPWSTR lpBuffer, OUT LPWSTR *lpFilePart ); #define SearchPath SearchPathW PALIMPORT BOOL PALAPI CopyFileW( IN LPCWSTR lpExistingFileName, IN LPCWSTR lpNewFileName, IN BOOL bFailIfExists); #ifdef UNICODE #define CopyFile CopyFileW #else #define CopyFile CopyFileA #endif PALIMPORT BOOL PALAPI DeleteFileW( IN LPCWSTR lpFileName); #ifdef UNICODE #define DeleteFile DeleteFileW #else #define DeleteFile DeleteFileA #endif #define MOVEFILE_REPLACE_EXISTING 0x00000001 #define MOVEFILE_COPY_ALLOWED 0x00000002 PALIMPORT BOOL PALAPI MoveFileExW( IN LPCWSTR lpExistingFileName, IN LPCWSTR lpNewFileName, IN DWORD dwFlags); #ifdef UNICODE #define MoveFileEx MoveFileExW #else #define MoveFileEx MoveFileExA #endif typedef struct _BY_HANDLE_FILE_INFORMATION { DWORD dwFileAttributes; FILETIME ftCreationTime; FILETIME ftLastAccessTime; FILETIME ftLastWriteTime; DWORD dwVolumeSerialNumber; DWORD nFileSizeHigh; DWORD nFileSizeLow; DWORD nNumberOfLinks; DWORD nFileIndexHigh; DWORD nFileIndexLow; } BY_HANDLE_FILE_INFORMATION, *PBY_HANDLE_FILE_INFORMATION, *LPBY_HANDLE_FILE_INFORMATION; typedef struct _WIN32_FIND_DATAA { DWORD dwFileAttributes; FILETIME ftCreationTime; FILETIME ftLastAccessTime; FILETIME ftLastWriteTime; DWORD nFileSizeHigh; DWORD nFileSizeLow; DWORD dwReserved0; DWORD dwReserved1; CHAR cFileName[ MAX_PATH_FNAME ]; CHAR cAlternateFileName[ 14 ]; } WIN32_FIND_DATAA, *PWIN32_FIND_DATAA, *LPWIN32_FIND_DATAA; typedef struct _WIN32_FIND_DATAW { DWORD dwFileAttributes; FILETIME ftCreationTime; FILETIME ftLastAccessTime; FILETIME ftLastWriteTime; DWORD nFileSizeHigh; DWORD nFileSizeLow; DWORD dwReserved0; DWORD dwReserved1; WCHAR cFileName[ MAX_PATH_FNAME ]; WCHAR cAlternateFileName[ 14 ]; } WIN32_FIND_DATAW, *PWIN32_FIND_DATAW, *LPWIN32_FIND_DATAW; #ifdef UNICODE typedef WIN32_FIND_DATAW WIN32_FIND_DATA; typedef PWIN32_FIND_DATAW PWIN32_FIND_DATA; typedef LPWIN32_FIND_DATAW LPWIN32_FIND_DATA; #else typedef WIN32_FIND_DATAA WIN32_FIND_DATA; typedef PWIN32_FIND_DATAA PWIN32_FIND_DATA; typedef LPWIN32_FIND_DATAA LPWIN32_FIND_DATA; #endif PALIMPORT HANDLE PALAPI FindFirstFileW( IN LPCWSTR lpFileName, OUT LPWIN32_FIND_DATAW lpFindFileData); #ifdef UNICODE #define FindFirstFile FindFirstFileW #else #define FindFirstFile FindFirstFileA #endif PALIMPORT BOOL PALAPI FindNextFileW( IN HANDLE hFindFile, OUT LPWIN32_FIND_DATAW lpFindFileData); #ifdef UNICODE #define FindNextFile FindNextFileW #else #define FindNextFile FindNextFileA #endif PALIMPORT BOOL PALAPI FindClose( IN OUT HANDLE hFindFile); PALIMPORT DWORD PALAPI GetFileAttributesW( IN LPCWSTR lpFileName); #ifdef UNICODE #define GetFileAttributes GetFileAttributesW #else #define GetFileAttributes GetFileAttributesA #endif typedef enum _GET_FILEEX_INFO_LEVELS { GetFileExInfoStandard } GET_FILEEX_INFO_LEVELS; typedef enum _FINDEX_INFO_LEVELS { FindExInfoStandard, FindExInfoBasic, FindExInfoMaxInfoLevel } FINDEX_INFO_LEVELS; typedef enum _FINDEX_SEARCH_OPS { FindExSearchNameMatch, FindExSearchLimitToDirectories, FindExSearchLimitToDevices, FindExSearchMaxSearchOp } FINDEX_SEARCH_OPS; typedef struct _WIN32_FILE_ATTRIBUTE_DATA { DWORD dwFileAttributes; FILETIME ftCreationTime; FILETIME ftLastAccessTime; FILETIME ftLastWriteTime; DWORD nFileSizeHigh; DWORD nFileSizeLow; } WIN32_FILE_ATTRIBUTE_DATA, *LPWIN32_FILE_ATTRIBUTE_DATA; PALIMPORT BOOL PALAPI GetFileAttributesExW( IN LPCWSTR lpFileName, IN GET_FILEEX_INFO_LEVELS fInfoLevelId, OUT LPVOID lpFileInformation); #ifdef UNICODE #define GetFileAttributesEx GetFileAttributesExW #endif typedef struct _OVERLAPPED { ULONG_PTR Internal; ULONG_PTR InternalHigh; DWORD Offset; DWORD OffsetHigh; HANDLE hEvent; } OVERLAPPED, *LPOVERLAPPED; PALIMPORT BOOL PALAPI WriteFile( IN HANDLE hFile, IN LPCVOID lpBuffer, IN DWORD nNumberOfBytesToWrite, OUT LPDWORD lpNumberOfBytesWritten, IN LPOVERLAPPED lpOverlapped); PALIMPORT BOOL PALAPI ReadFile( IN HANDLE hFile, OUT LPVOID lpBuffer, IN DWORD nNumberOfBytesToRead, OUT LPDWORD lpNumberOfBytesRead, IN LPOVERLAPPED lpOverlapped); #define STD_INPUT_HANDLE ((DWORD)-10) #define STD_OUTPUT_HANDLE ((DWORD)-11) #define STD_ERROR_HANDLE ((DWORD)-12) PALIMPORT HANDLE PALAPI GetStdHandle( IN DWORD nStdHandle); PALIMPORT BOOL PALAPI SetEndOfFile( IN HANDLE hFile); PALIMPORT DWORD PALAPI SetFilePointer( IN HANDLE hFile, IN LONG lDistanceToMove, IN PLONG lpDistanceToMoveHigh, IN DWORD dwMoveMethod); PALIMPORT BOOL PALAPI SetFilePointerEx( IN HANDLE hFile, IN LARGE_INTEGER liDistanceToMove, OUT PLARGE_INTEGER lpNewFilePointer, IN DWORD dwMoveMethod); PALIMPORT DWORD PALAPI GetFileSize( IN HANDLE hFile, OUT LPDWORD lpFileSizeHigh); PALIMPORT BOOL PALAPI GetFileSizeEx( IN HANDLE hFile, OUT PLARGE_INTEGER lpFileSize); PALIMPORT VOID PALAPI GetSystemTimeAsFileTime( OUT LPFILETIME lpSystemTimeAsFileTime); typedef struct _SYSTEMTIME { WORD wYear; WORD wMonth; WORD wDayOfWeek; WORD wDay; WORD wHour; WORD wMinute; WORD wSecond; WORD wMilliseconds; } SYSTEMTIME, *PSYSTEMTIME, *LPSYSTEMTIME; PALIMPORT VOID PALAPI GetSystemTime( OUT LPSYSTEMTIME lpSystemTime); PALIMPORT BOOL PALAPI FileTimeToSystemTime( IN CONST FILETIME *lpFileTime, OUT LPSYSTEMTIME lpSystemTime); PALIMPORT BOOL PALAPI FlushFileBuffers( IN HANDLE hFile); PALIMPORT UINT PALAPI GetConsoleOutputCP(); PALIMPORT DWORD PALAPI GetFullPathNameW( IN LPCWSTR lpFileName, IN DWORD nBufferLength, OUT LPWSTR lpBuffer, OUT LPWSTR *lpFilePart); #ifdef UNICODE #define GetFullPathName GetFullPathNameW #else #define GetFullPathName GetFullPathNameA #endif PALIMPORT UINT PALAPI GetTempFileNameW( IN LPCWSTR lpPathName, IN LPCWSTR lpPrefixString, IN UINT uUnique, OUT LPWSTR lpTempFileName); #ifdef UNICODE #define GetTempFileName GetTempFileNameW #else #define GetTempFileName GetTempFileNameA #endif PALIMPORT DWORD PALAPI GetTempPathW( IN DWORD nBufferLength, OUT LPWSTR lpBuffer); PALIMPORT DWORD PALAPI GetTempPathA( IN DWORD nBufferLength, OUT LPSTR lpBuffer); #ifdef UNICODE #define GetTempPath GetTempPathW #else #define GetTempPath GetTempPathA #endif PALIMPORT DWORD PALAPI GetCurrentDirectoryW( IN DWORD nBufferLength, OUT LPWSTR lpBuffer); #ifdef UNICODE #define GetCurrentDirectory GetCurrentDirectoryW #else #define GetCurrentDirectory GetCurrentDirectoryA #endif PALIMPORT HANDLE PALAPI CreateSemaphoreExW( IN LPSECURITY_ATTRIBUTES lpSemaphoreAttributes, IN LONG lInitialCount, IN LONG lMaximumCount, IN LPCWSTR lpName, IN /*_Reserved_*/ DWORD dwFlags, IN DWORD dwDesiredAccess); PALIMPORT HANDLE PALAPI OpenSemaphoreW( IN DWORD dwDesiredAccess, IN BOOL bInheritHandle, IN LPCWSTR lpName); #define CreateSemaphoreEx CreateSemaphoreExW PALIMPORT BOOL PALAPI ReleaseSemaphore( IN HANDLE hSemaphore, IN LONG lReleaseCount, OUT LPLONG lpPreviousCount); PALIMPORT HANDLE PALAPI CreateEventW( IN LPSECURITY_ATTRIBUTES lpEventAttributes, IN BOOL bManualReset, IN BOOL bInitialState, IN LPCWSTR lpName); PALIMPORT HANDLE PALAPI CreateEventExW( IN LPSECURITY_ATTRIBUTES lpEventAttributes, IN LPCWSTR lpName, IN DWORD dwFlags, IN DWORD dwDesiredAccess); // CreateEventExW: dwFlags #define CREATE_EVENT_MANUAL_RESET ((DWORD)0x1) #define CREATE_EVENT_INITIAL_SET ((DWORD)0x2) #define CreateEvent CreateEventW PALIMPORT BOOL PALAPI SetEvent( IN HANDLE hEvent); PALIMPORT BOOL PALAPI ResetEvent( IN HANDLE hEvent); PALIMPORT HANDLE PALAPI OpenEventW( IN DWORD dwDesiredAccess, IN BOOL bInheritHandle, IN LPCWSTR lpName); #ifdef UNICODE #define OpenEvent OpenEventW #endif PALIMPORT HANDLE PALAPI CreateMutexW( IN LPSECURITY_ATTRIBUTES lpMutexAttributes, IN BOOL bInitialOwner, IN LPCWSTR lpName); PALIMPORT HANDLE PALAPI CreateMutexExW( IN LPSECURITY_ATTRIBUTES lpMutexAttributes, IN LPCWSTR lpName, IN DWORD dwFlags, IN DWORD dwDesiredAccess); // CreateMutexExW: dwFlags #define CREATE_MUTEX_INITIAL_OWNER ((DWORD)0x1) #define CreateMutex CreateMutexW PALIMPORT HANDLE PALAPI OpenMutexW( IN DWORD dwDesiredAccess, IN BOOL bInheritHandle, IN LPCWSTR lpName); #ifdef UNICODE #define OpenMutex OpenMutexW #endif PALIMPORT BOOL PALAPI ReleaseMutex( IN HANDLE hMutex); PALIMPORT DWORD PALAPI GetCurrentProcessId(); PALIMPORT DWORD PALAPI GetCurrentSessionId(); PALIMPORT HANDLE PALAPI GetCurrentProcess(); PALIMPORT DWORD PALAPI GetCurrentThreadId(); PALIMPORT size_t PALAPI PAL_GetCurrentOSThreadId(); // To work around multiply-defined symbols in the Carbon framework. #define GetCurrentThread PAL_GetCurrentThread PALIMPORT HANDLE PALAPI GetCurrentThread(); #define STARTF_USESTDHANDLES 0x00000100 typedef struct _STARTUPINFOW { DWORD cb; LPWSTR lpReserved_PAL_Undefined; LPWSTR lpDesktop_PAL_Undefined; LPWSTR lpTitle_PAL_Undefined; DWORD dwX_PAL_Undefined; DWORD dwY_PAL_Undefined; DWORD dwXSize_PAL_Undefined; DWORD dwYSize_PAL_Undefined; DWORD dwXCountChars_PAL_Undefined; DWORD dwYCountChars_PAL_Undefined; DWORD dwFillAttribute_PAL_Undefined; DWORD dwFlags; WORD wShowWindow_PAL_Undefined; WORD cbReserved2_PAL_Undefined; LPBYTE lpReserved2_PAL_Undefined; HANDLE hStdInput; HANDLE hStdOutput; HANDLE hStdError; } STARTUPINFOW, *LPSTARTUPINFOW; typedef STARTUPINFOW STARTUPINFO; typedef LPSTARTUPINFOW LPSTARTUPINFO; #define CREATE_NEW_CONSOLE 0x00000010 #define NORMAL_PRIORITY_CLASS 0x00000020 typedef struct _PROCESS_INFORMATION { HANDLE hProcess; HANDLE hThread; DWORD dwProcessId; DWORD dwThreadId_PAL_Undefined; } PROCESS_INFORMATION, *PPROCESS_INFORMATION, *LPPROCESS_INFORMATION; PALIMPORT BOOL PALAPI CreateProcessW( IN LPCWSTR lpApplicationName, IN LPWSTR lpCommandLine, IN LPSECURITY_ATTRIBUTES lpProcessAttributes, IN LPSECURITY_ATTRIBUTES lpThreadAttributes, IN BOOL bInheritHandles, IN DWORD dwCreationFlags, IN LPVOID lpEnvironment, IN LPCWSTR lpCurrentDirectory, IN LPSTARTUPINFOW lpStartupInfo, OUT LPPROCESS_INFORMATION lpProcessInformation); #define CreateProcess CreateProcessW PALIMPORT PAL_NORETURN VOID PALAPI ExitProcess( IN UINT uExitCode); PALIMPORT BOOL PALAPI TerminateProcess( IN HANDLE hProcess, IN UINT uExitCode); PALIMPORT BOOL PALAPI GetExitCodeProcess( IN HANDLE hProcess, IN LPDWORD lpExitCode); PALIMPORT BOOL PALAPI GetProcessTimes( IN HANDLE hProcess, OUT LPFILETIME lpCreationTime, OUT LPFILETIME lpExitTime, OUT LPFILETIME lpKernelTime, OUT LPFILETIME lpUserTime); #define MAXIMUM_WAIT_OBJECTS 64 #define WAIT_OBJECT_0 0 #define WAIT_ABANDONED 0x00000080 #define WAIT_ABANDONED_0 0x00000080 #define WAIT_TIMEOUT 258 #define WAIT_FAILED ((DWORD)0xFFFFFFFF) #define INFINITE 0xFFFFFFFF // Infinite timeout PALIMPORT DWORD PALAPI WaitForSingleObject( IN HANDLE hHandle, IN DWORD dwMilliseconds); PALIMPORT DWORD PALAPI PAL_WaitForSingleObjectPrioritized( IN HANDLE hHandle, IN DWORD dwMilliseconds); PALIMPORT DWORD PALAPI WaitForSingleObjectEx( IN HANDLE hHandle, IN DWORD dwMilliseconds, IN BOOL bAlertable); PALIMPORT DWORD PALAPI WaitForMultipleObjects( IN DWORD nCount, IN CONST HANDLE *lpHandles, IN BOOL bWaitAll, IN DWORD dwMilliseconds); PALIMPORT DWORD PALAPI WaitForMultipleObjectsEx( IN DWORD nCount, IN CONST HANDLE *lpHandles, IN BOOL bWaitAll, IN DWORD dwMilliseconds, IN BOOL bAlertable); PALIMPORT DWORD PALAPI SignalObjectAndWait( IN HANDLE hObjectToSignal, IN HANDLE hObjectToWaitOn, IN DWORD dwMilliseconds, IN BOOL bAlertable); #define DUPLICATE_CLOSE_SOURCE 0x00000001 #define DUPLICATE_SAME_ACCESS 0x00000002 PALIMPORT BOOL PALAPI DuplicateHandle( IN HANDLE hSourceProcessHandle, IN HANDLE hSourceHandle, IN HANDLE hTargetProcessHandle, OUT LPHANDLE lpTargetHandle, IN DWORD dwDesiredAccess, IN BOOL bInheritHandle, IN DWORD dwOptions); PALIMPORT VOID PALAPI Sleep( IN DWORD dwMilliseconds); PALIMPORT DWORD PALAPI SleepEx( IN DWORD dwMilliseconds, IN BOOL bAlertable); PALIMPORT BOOL PALAPI SwitchToThread(); #define DEBUG_PROCESS 0x00000001 #define DEBUG_ONLY_THIS_PROCESS 0x00000002 #define CREATE_SUSPENDED 0x00000004 #define STACK_SIZE_PARAM_IS_A_RESERVATION 0x00010000 PALIMPORT HANDLE PALAPI CreateThread( IN LPSECURITY_ATTRIBUTES lpThreadAttributes, IN DWORD dwStackSize, IN LPTHREAD_START_ROUTINE lpStartAddress, IN LPVOID lpParameter, IN DWORD dwCreationFlags, OUT LPDWORD lpThreadId); PALIMPORT HANDLE PALAPI PAL_CreateThread64( IN LPSECURITY_ATTRIBUTES lpThreadAttributes, IN DWORD dwStackSize, IN LPTHREAD_START_ROUTINE lpStartAddress, IN LPVOID lpParameter, IN DWORD dwCreationFlags, OUT SIZE_T* pThreadId); PALIMPORT PAL_NORETURN VOID PALAPI ExitThread( IN DWORD dwExitCode); PALIMPORT DWORD PALAPI ResumeThread( IN HANDLE hThread); typedef VOID (PALAPI_NOEXPORT *PAPCFUNC)(ULONG_PTR dwParam); PALIMPORT DWORD PALAPI QueueUserAPC( IN PAPCFUNC pfnAPC, IN HANDLE hThread, IN ULONG_PTR dwData); #ifdef HOST_X86 // // *********************************************************************************** // // NOTE: These context definitions are replicated in ndp/clr/src/debug/inc/DbgTargetContext.h (for the // purposes manipulating contexts from different platforms during remote debugging). Be sure to keep those // definitions in sync if you make any changes here. // // *********************************************************************************** // #define SIZE_OF_80387_REGISTERS 80 #define CONTEXT_i386 0x00010000 #define CONTEXT_CONTROL (CONTEXT_i386 | 0x00000001L) // SS:SP, CS:IP, FLAGS, BP #define CONTEXT_INTEGER (CONTEXT_i386 | 0x00000002L) // AX, BX, CX, DX, SI, DI #define CONTEXT_SEGMENTS (CONTEXT_i386 | 0x00000004L) #define CONTEXT_FLOATING_POINT (CONTEXT_i386 | 0x00000008L) // 387 state #define CONTEXT_DEBUG_REGISTERS (CONTEXT_i386 | 0x00000010L) #define CONTEXT_FULL (CONTEXT_CONTROL | CONTEXT_INTEGER | CONTEXT_SEGMENTS) #define CONTEXT_EXTENDED_REGISTERS (CONTEXT_i386 | 0x00000020L) #define CONTEXT_ALL (CONTEXT_CONTROL | CONTEXT_INTEGER | CONTEXT_SEGMENTS | CONTEXT_FLOATING_POINT | CONTEXT_DEBUG_REGISTERS | CONTEXT_EXTENDED_REGISTERS) #define MAXIMUM_SUPPORTED_EXTENSION 512 #define CONTEXT_XSTATE (CONTEXT_i386 | 0x40L) #define CONTEXT_EXCEPTION_ACTIVE 0x8000000L #define CONTEXT_SERVICE_ACTIVE 0x10000000L #define CONTEXT_EXCEPTION_REQUEST 0x40000000L #define CONTEXT_EXCEPTION_REPORTING 0x80000000L // // This flag is set by the unwinder if it has unwound to a call // site, and cleared whenever it unwinds through a trap frame. // It is used by language-specific exception handlers to help // differentiate exception scopes during dispatching. // #define CONTEXT_UNWOUND_TO_CALL 0x20000000 typedef struct _FLOATING_SAVE_AREA { DWORD ControlWord; DWORD StatusWord; DWORD TagWord; DWORD ErrorOffset; DWORD ErrorSelector; DWORD DataOffset; DWORD DataSelector; BYTE RegisterArea[SIZE_OF_80387_REGISTERS]; DWORD Cr0NpxState; } FLOATING_SAVE_AREA; typedef FLOATING_SAVE_AREA *PFLOATING_SAVE_AREA; typedef struct _CONTEXT { ULONG ContextFlags; ULONG Dr0_PAL_Undefined; ULONG Dr1_PAL_Undefined; ULONG Dr2_PAL_Undefined; ULONG Dr3_PAL_Undefined; ULONG Dr6_PAL_Undefined; ULONG Dr7_PAL_Undefined; FLOATING_SAVE_AREA FloatSave; ULONG SegGs_PAL_Undefined; ULONG SegFs_PAL_Undefined; ULONG SegEs_PAL_Undefined; ULONG SegDs_PAL_Undefined; ULONG Edi; ULONG Esi; ULONG Ebx; ULONG Edx; ULONG Ecx; ULONG Eax; ULONG Ebp; ULONG Eip; ULONG SegCs; ULONG EFlags; ULONG Esp; ULONG SegSs; UCHAR ExtendedRegisters[MAXIMUM_SUPPORTED_EXTENSION]; } CONTEXT, *PCONTEXT, *LPCONTEXT; // To support saving and loading xmm register context we need to know the offset in the ExtendedRegisters // section at which they are stored. This has been determined experimentally since I have found no // documentation thus far but it corresponds to the offset we'd expect if a fxsave instruction was used to // store the regular FP state along with the XMM registers at the start of the extended registers section. // Technically the offset doesn't really matter if no code in the PAL or runtime knows what the offset should // be either (as long as we're consistent across GetThreadContext() and SetThreadContext() and we don't // support any other values in the ExtendedRegisters) but we might as well be as accurate as we can. #define CONTEXT_EXREG_XMM_OFFSET 160 typedef struct _KNONVOLATILE_CONTEXT { DWORD Edi; DWORD Esi; DWORD Ebx; DWORD Ebp; } KNONVOLATILE_CONTEXT, *PKNONVOLATILE_CONTEXT; typedef struct _KNONVOLATILE_CONTEXT_POINTERS { // The ordering of these fields should be aligned with that // of corresponding fields in CONTEXT // // (See FillRegDisplay in inc/regdisp.h for details) PDWORD Edi; PDWORD Esi; PDWORD Ebx; PDWORD Edx; PDWORD Ecx; PDWORD Eax; PDWORD Ebp; } KNONVOLATILE_CONTEXT_POINTERS, *PKNONVOLATILE_CONTEXT_POINTERS; #elif defined(HOST_AMD64) // copied from winnt.h #define CONTEXT_AMD64 0x100000 #define CONTEXT_CONTROL (CONTEXT_AMD64 | 0x1L) #define CONTEXT_INTEGER (CONTEXT_AMD64 | 0x2L) #define CONTEXT_SEGMENTS (CONTEXT_AMD64 | 0x4L) #define CONTEXT_FLOATING_POINT (CONTEXT_AMD64 | 0x8L) #define CONTEXT_DEBUG_REGISTERS (CONTEXT_AMD64 | 0x10L) #define CONTEXT_FULL (CONTEXT_CONTROL | CONTEXT_INTEGER | CONTEXT_FLOATING_POINT) #define CONTEXT_ALL (CONTEXT_CONTROL | CONTEXT_INTEGER | CONTEXT_SEGMENTS | CONTEXT_FLOATING_POINT | CONTEXT_DEBUG_REGISTERS) #define CONTEXT_XSTATE (CONTEXT_AMD64 | 0x40L) #define CONTEXT_EXCEPTION_ACTIVE 0x8000000 #define CONTEXT_SERVICE_ACTIVE 0x10000000 #define CONTEXT_EXCEPTION_REQUEST 0x40000000 #define CONTEXT_EXCEPTION_REPORTING 0x80000000 typedef struct DECLSPEC_ALIGN(16) _M128A { ULONGLONG Low; LONGLONG High; } M128A, *PM128A; typedef struct _XMM_SAVE_AREA32 { WORD ControlWord; WORD StatusWord; BYTE TagWord; BYTE Reserved1; WORD ErrorOpcode; DWORD ErrorOffset; WORD ErrorSelector; WORD Reserved2; DWORD DataOffset; WORD DataSelector; WORD Reserved3; DWORD MxCsr; DWORD MxCsr_Mask; M128A FloatRegisters[8]; M128A XmmRegisters[16]; BYTE Reserved4[96]; } XMM_SAVE_AREA32, *PXMM_SAVE_AREA32; #define LEGACY_SAVE_AREA_LENGTH sizeof(XMM_SAVE_AREA32) // // Context Frame // // This frame has a several purposes: 1) it is used as an argument to // NtContinue, 2) is is used to constuct a call frame for APC delivery, // and 3) it is used in the user level thread creation routines. // // // The flags field within this record controls the contents of a CONTEXT // record. // // If the context record is used as an input parameter, then for each // portion of the context record controlled by a flag whose value is // set, it is assumed that that portion of the context record contains // valid context. If the context record is being used to modify a threads // context, then only that portion of the threads context is modified. // // If the context record is used as an output parameter to capture the // context of a thread, then only those portions of the thread's context // corresponding to set flags will be returned. // // CONTEXT_CONTROL specifies SegSs, Rsp, SegCs, Rip, and EFlags. // // CONTEXT_INTEGER specifies Rax, Rcx, Rdx, Rbx, Rbp, Rsi, Rdi, and R8-R15. // // CONTEXT_SEGMENTS specifies SegDs, SegEs, SegFs, and SegGs. // // CONTEXT_DEBUG_REGISTERS specifies Dr0-Dr3 and Dr6-Dr7. // // CONTEXT_MMX_REGISTERS specifies the floating point and extended registers // Mm0/St0-Mm7/St7 and Xmm0-Xmm15). // typedef struct DECLSPEC_ALIGN(16) _CONTEXT { // // Register parameter home addresses. // // N.B. These fields are for convience - they could be used to extend the // context record in the future. // DWORD64 P1Home; DWORD64 P2Home; DWORD64 P3Home; DWORD64 P4Home; DWORD64 P5Home; DWORD64 P6Home; // // Control flags. // DWORD ContextFlags; DWORD MxCsr; // // Segment Registers and processor flags. // WORD SegCs; WORD SegDs; WORD SegEs; WORD SegFs; WORD SegGs; WORD SegSs; DWORD EFlags; // // Debug registers // DWORD64 Dr0; DWORD64 Dr1; DWORD64 Dr2; DWORD64 Dr3; DWORD64 Dr6; DWORD64 Dr7; // // Integer registers. // DWORD64 Rax; DWORD64 Rcx; DWORD64 Rdx; DWORD64 Rbx; DWORD64 Rsp; DWORD64 Rbp; DWORD64 Rsi; DWORD64 Rdi; DWORD64 R8; DWORD64 R9; DWORD64 R10; DWORD64 R11; DWORD64 R12; DWORD64 R13; DWORD64 R14; DWORD64 R15; // // Program counter. // DWORD64 Rip; // // Floating point state. // union { XMM_SAVE_AREA32 FltSave; struct { M128A Header[2]; M128A Legacy[8]; M128A Xmm0; M128A Xmm1; M128A Xmm2; M128A Xmm3; M128A Xmm4; M128A Xmm5; M128A Xmm6; M128A Xmm7; M128A Xmm8; M128A Xmm9; M128A Xmm10; M128A Xmm11; M128A Xmm12; M128A Xmm13; M128A Xmm14; M128A Xmm15; }; }; // // Vector registers. // M128A VectorRegister[26]; DWORD64 VectorControl; // // Special debug control registers. // DWORD64 DebugControl; DWORD64 LastBranchToRip; DWORD64 LastBranchFromRip; DWORD64 LastExceptionToRip; DWORD64 LastExceptionFromRip; } CONTEXT, *PCONTEXT, *LPCONTEXT; // // Nonvolatile context pointer record. // typedef struct _KNONVOLATILE_CONTEXT_POINTERS { union { PM128A FloatingContext[16]; struct { PM128A Xmm0; PM128A Xmm1; PM128A Xmm2; PM128A Xmm3; PM128A Xmm4; PM128A Xmm5; PM128A Xmm6; PM128A Xmm7; PM128A Xmm8; PM128A Xmm9; PM128A Xmm10; PM128A Xmm11; PM128A Xmm12; PM128A Xmm13; PM128A Xmm14; PM128A Xmm15; } ; } ; union { PDWORD64 IntegerContext[16]; struct { PDWORD64 Rax; PDWORD64 Rcx; PDWORD64 Rdx; PDWORD64 Rbx; PDWORD64 Rsp; PDWORD64 Rbp; PDWORD64 Rsi; PDWORD64 Rdi; PDWORD64 R8; PDWORD64 R9; PDWORD64 R10; PDWORD64 R11; PDWORD64 R12; PDWORD64 R13; PDWORD64 R14; PDWORD64 R15; } ; } ; } KNONVOLATILE_CONTEXT_POINTERS, *PKNONVOLATILE_CONTEXT_POINTERS; #elif defined(HOST_ARM) #define CONTEXT_ARM 0x00200000L // end_wx86 #define CONTEXT_CONTROL (CONTEXT_ARM | 0x1L) #define CONTEXT_INTEGER (CONTEXT_ARM | 0x2L) #define CONTEXT_FLOATING_POINT (CONTEXT_ARM | 0x4L) #define CONTEXT_DEBUG_REGISTERS (CONTEXT_ARM | 0x8L) #define CONTEXT_FULL (CONTEXT_CONTROL | CONTEXT_INTEGER | CONTEXT_FLOATING_POINT) #define CONTEXT_ALL (CONTEXT_CONTROL | CONTEXT_INTEGER | CONTEXT_FLOATING_POINT | CONTEXT_DEBUG_REGISTERS) #define CONTEXT_EXCEPTION_ACTIVE 0x8000000L #define CONTEXT_SERVICE_ACTIVE 0x10000000L #define CONTEXT_EXCEPTION_REQUEST 0x40000000L #define CONTEXT_EXCEPTION_REPORTING 0x80000000L // // This flag is set by the unwinder if it has unwound to a call // site, and cleared whenever it unwinds through a trap frame. // It is used by language-specific exception handlers to help // differentiate exception scopes during dispatching. // #define CONTEXT_UNWOUND_TO_CALL 0x20000000 // // Specify the number of breakpoints and watchpoints that the OS // will track. Architecturally, ARM supports up to 16. In practice, // however, almost no one implements more than 4 of each. // #define ARM_MAX_BREAKPOINTS 8 #define ARM_MAX_WATCHPOINTS 1 typedef struct _NEON128 { ULONGLONG Low; LONGLONG High; } NEON128, *PNEON128; // // Context Frame // // This frame has a several purposes: 1) it is used as an argument to // NtContinue, 2) it is used to constuct a call frame for APC delivery, // and 3) it is used in the user level thread creation routines. // // // The flags field within this record controls the contents of a CONTEXT // record. // // If the context record is used as an input parameter, then for each // portion of the context record controlled by a flag whose value is // set, it is assumed that that portion of the context record contains // valid context. If the context record is being used to modify a threads // context, then only that portion of the threads context is modified. // // If the context record is used as an output parameter to capture the // context of a thread, then only those portions of the thread's context // corresponding to set flags will be returned. // // CONTEXT_CONTROL specifies Sp, Lr, Pc, and Cpsr // // CONTEXT_INTEGER specifies R0-R12 // // CONTEXT_FLOATING_POINT specifies Q0-Q15 / D0-D31 / S0-S31 // // CONTEXT_DEBUG_REGISTERS specifies up to 16 of DBGBVR, DBGBCR, DBGWVR, // DBGWCR. // typedef struct DECLSPEC_ALIGN(8) _CONTEXT { // // Control flags. // DWORD ContextFlags; // // Integer registers // DWORD R0; DWORD R1; DWORD R2; DWORD R3; DWORD R4; DWORD R5; DWORD R6; DWORD R7; DWORD R8; DWORD R9; DWORD R10; DWORD R11; DWORD R12; // // Control Registers // DWORD Sp; DWORD Lr; DWORD Pc; DWORD Cpsr; // // Floating Point/NEON Registers // DWORD Fpscr; DWORD Padding; union { NEON128 Q[16]; ULONGLONG D[32]; DWORD S[32]; }; // // Debug registers // DWORD Bvr[ARM_MAX_BREAKPOINTS]; DWORD Bcr[ARM_MAX_BREAKPOINTS]; DWORD Wvr[ARM_MAX_WATCHPOINTS]; DWORD Wcr[ARM_MAX_WATCHPOINTS]; DWORD Padding2[2]; } CONTEXT, *PCONTEXT, *LPCONTEXT; // // Nonvolatile context pointer record. // typedef struct _KNONVOLATILE_CONTEXT_POINTERS { PDWORD R4; PDWORD R5; PDWORD R6; PDWORD R7; PDWORD R8; PDWORD R9; PDWORD R10; PDWORD R11; PDWORD Lr; PULONGLONG D8; PULONGLONG D9; PULONGLONG D10; PULONGLONG D11; PULONGLONG D12; PULONGLONG D13; PULONGLONG D14; PULONGLONG D15; } KNONVOLATILE_CONTEXT_POINTERS, *PKNONVOLATILE_CONTEXT_POINTERS; typedef struct _IMAGE_ARM_RUNTIME_FUNCTION_ENTRY { DWORD BeginAddress; DWORD EndAddress; union { DWORD UnwindData; struct { DWORD Flag : 2; DWORD FunctionLength : 11; DWORD Ret : 2; DWORD H : 1; DWORD Reg : 3; DWORD R : 1; DWORD L : 1; DWORD C : 1; DWORD StackAdjust : 10; }; }; } IMAGE_ARM_RUNTIME_FUNCTION_ENTRY, * PIMAGE_ARM_RUNTIME_FUNCTION_ENTRY; #elif defined(HOST_ARM64) #define CONTEXT_ARM64 0x00400000L #define CONTEXT_CONTROL (CONTEXT_ARM64 | 0x1L) #define CONTEXT_INTEGER (CONTEXT_ARM64 | 0x2L) #define CONTEXT_FLOATING_POINT (CONTEXT_ARM64 | 0x4L) #define CONTEXT_DEBUG_REGISTERS (CONTEXT_ARM64 | 0x8L) #define CONTEXT_FULL (CONTEXT_CONTROL | CONTEXT_INTEGER | CONTEXT_FLOATING_POINT) #define CONTEXT_ALL (CONTEXT_CONTROL | CONTEXT_INTEGER | CONTEXT_FLOATING_POINT | CONTEXT_DEBUG_REGISTERS) #define CONTEXT_EXCEPTION_ACTIVE 0x8000000L #define CONTEXT_SERVICE_ACTIVE 0x10000000L #define CONTEXT_EXCEPTION_REQUEST 0x40000000L #define CONTEXT_EXCEPTION_REPORTING 0x80000000L // // This flag is set by the unwinder if it has unwound to a call // site, and cleared whenever it unwinds through a trap frame. // It is used by language-specific exception handlers to help // differentiate exception scopes during dispatching. // #define CONTEXT_UNWOUND_TO_CALL 0x20000000 // // Define initial Cpsr/Fpscr value // #define INITIAL_CPSR 0x10 #define INITIAL_FPSCR 0 // begin_ntoshvp // // Specify the number of breakpoints and watchpoints that the OS // will track. Architecturally, ARM64 supports up to 16. In practice, // however, almost no one implements more than 4 of each. // #define ARM64_MAX_BREAKPOINTS 8 #define ARM64_MAX_WATCHPOINTS 2 // // Context Frame // // This frame has a several purposes: 1) it is used as an argument to // NtContinue, 2) it is used to constuct a call frame for APC delivery, // and 3) it is used in the user level thread creation routines. // // // The flags field within this record controls the contents of a CONTEXT // record. // // If the context record is used as an input parameter, then for each // portion of the context record controlled by a flag whose value is // set, it is assumed that that portion of the context record contains // valid context. If the context record is being used to modify a threads // context, then only that portion of the threads context is modified. // // If the context record is used as an output parameter to capture the // context of a thread, then only those portions of the thread's context // corresponding to set flags will be returned. // // CONTEXT_CONTROL specifies Sp, Lr, Pc, and Cpsr // // CONTEXT_INTEGER specifies R0-R12 // // CONTEXT_FLOATING_POINT specifies Q0-Q15 / D0-D31 / S0-S31 // // CONTEXT_DEBUG_REGISTERS specifies up to 16 of DBGBVR, DBGBCR, DBGWVR, // DBGWCR. // typedef struct _NEON128 { ULONGLONG Low; LONGLONG High; } NEON128, *PNEON128; typedef struct DECLSPEC_ALIGN(16) _CONTEXT { // // Control flags. // /* +0x000 */ DWORD ContextFlags; // // Integer registers // /* +0x004 */ DWORD Cpsr; // NZVF + DAIF + CurrentEL + SPSel /* +0x008 */ union { struct { DWORD64 X0; DWORD64 X1; DWORD64 X2; DWORD64 X3; DWORD64 X4; DWORD64 X5; DWORD64 X6; DWORD64 X7; DWORD64 X8; DWORD64 X9; DWORD64 X10; DWORD64 X11; DWORD64 X12; DWORD64 X13; DWORD64 X14; DWORD64 X15; DWORD64 X16; DWORD64 X17; DWORD64 X18; DWORD64 X19; DWORD64 X20; DWORD64 X21; DWORD64 X22; DWORD64 X23; DWORD64 X24; DWORD64 X25; DWORD64 X26; DWORD64 X27; DWORD64 X28; }; DWORD64 X[29]; }; /* +0x0f0 */ DWORD64 Fp; /* +0x0f8 */ DWORD64 Lr; /* +0x100 */ DWORD64 Sp; /* +0x108 */ DWORD64 Pc; // // Floating Point/NEON Registers // /* +0x110 */ NEON128 V[32]; /* +0x310 */ DWORD Fpcr; /* +0x314 */ DWORD Fpsr; // // Debug registers // /* +0x318 */ DWORD Bcr[ARM64_MAX_BREAKPOINTS]; /* +0x338 */ DWORD64 Bvr[ARM64_MAX_BREAKPOINTS]; /* +0x378 */ DWORD Wcr[ARM64_MAX_WATCHPOINTS]; /* +0x380 */ DWORD64 Wvr[ARM64_MAX_WATCHPOINTS]; /* +0x390 */ } CONTEXT, *PCONTEXT, *LPCONTEXT; // // Nonvolatile context pointer record. // typedef struct _KNONVOLATILE_CONTEXT_POINTERS { PDWORD64 X19; PDWORD64 X20; PDWORD64 X21; PDWORD64 X22; PDWORD64 X23; PDWORD64 X24; PDWORD64 X25; PDWORD64 X26; PDWORD64 X27; PDWORD64 X28; PDWORD64 Fp; PDWORD64 Lr; PDWORD64 D8; PDWORD64 D9; PDWORD64 D10; PDWORD64 D11; PDWORD64 D12; PDWORD64 D13; PDWORD64 D14; PDWORD64 D15; } KNONVOLATILE_CONTEXT_POINTERS, *PKNONVOLATILE_CONTEXT_POINTERS; #elif defined(HOST_LOONGARCH64) //Please refence "src/pal/src/arch/loongarch64/asmconstants.h" #define CONTEXT_LOONGARCH64 0x00800000 #define CONTEXT_CONTROL (CONTEXT_LOONGARCH64 | 0x1) #define CONTEXT_INTEGER (CONTEXT_LOONGARCH64 | 0x2) #define CONTEXT_FLOATING_POINT (CONTEXT_LOONGARCH64 | 0x4) #define CONTEXT_DEBUG_REGISTERS (CONTEXT_LOONGARCH64 | 0x8) #define CONTEXT_FULL (CONTEXT_CONTROL | CONTEXT_INTEGER | CONTEXT_FLOATING_POINT) #define CONTEXT_ALL (CONTEXT_CONTROL | CONTEXT_INTEGER | CONTEXT_FLOATING_POINT | CONTEXT_DEBUG_REGISTERS) #define CONTEXT_EXCEPTION_ACTIVE 0x8000000 #define CONTEXT_SERVICE_ACTIVE 0x10000000 #define CONTEXT_EXCEPTION_REQUEST 0x40000000 #define CONTEXT_EXCEPTION_REPORTING 0x80000000 // // This flag is set by the unwinder if it has unwound to a call // site, and cleared whenever it unwinds through a trap frame. // It is used by language-specific exception handlers to help // differentiate exception scopes during dispatching. // #define CONTEXT_UNWOUND_TO_CALL 0x20000000 // begin_ntoshvp // // Specify the number of breakpoints and watchpoints that the OS // will track. Architecturally, LOONGARCH64 supports up to 16. In practice, // however, almost no one implements more than 4 of each. // #define LOONGARCH64_MAX_BREAKPOINTS 8 #define LOONGARCH64_MAX_WATCHPOINTS 2 // // Context Frame // // This frame has a several purposes: 1) it is used as an argument to // NtContinue, 2) it is used to constuct a call frame for APC delivery, // and 3) it is used in the user level thread creation routines. // // // The flags field within this record controls the contents of a CONTEXT // record. // // If the context record is used as an input parameter, then for each // portion of the context record controlled by a flag whose value is // set, it is assumed that that portion of the context record contains // valid context. If the context record is being used to modify a threads // context, then only that portion of the threads context is modified. // // If the context record is used as an output parameter to capture the // context of a thread, then only those portions of the thread's context // corresponding to set flags will be returned. // typedef struct DECLSPEC_ALIGN(16) _CONTEXT { // // Control flags. // /* +0x000 */ DWORD ContextFlags; // // Integer registers, abi=N64. // DWORD64 R0; DWORD64 Ra; DWORD64 Tp; DWORD64 Sp; DWORD64 A0;//DWORD64 V0; DWORD64 A1;//DWORD64 V1; DWORD64 A2; DWORD64 A3; DWORD64 A4; DWORD64 A5; DWORD64 A6; DWORD64 A7; DWORD64 T0; DWORD64 T1; DWORD64 T2; DWORD64 T3; DWORD64 T4; DWORD64 T5; DWORD64 T6; DWORD64 T7; DWORD64 T8; DWORD64 X0; DWORD64 Fp; DWORD64 S0; DWORD64 S1; DWORD64 S2; DWORD64 S3; DWORD64 S4; DWORD64 S5; DWORD64 S6; DWORD64 S7; DWORD64 S8; DWORD64 Pc; // // Floating Point Registers // //TODO: support the SIMD. DWORD64 F[32]; DWORD Fcsr; } CONTEXT, *PCONTEXT, *LPCONTEXT; // // Nonvolatile context pointer record. // typedef struct _KNONVOLATILE_CONTEXT_POINTERS { PDWORD64 S0; PDWORD64 S1; PDWORD64 S2; PDWORD64 S3; PDWORD64 S4; PDWORD64 S5; PDWORD64 S6; PDWORD64 S7; PDWORD64 S8; PDWORD64 Fp; PDWORD64 Tp; PDWORD64 Ra; PDWORD64 F24; PDWORD64 F25; PDWORD64 F26; PDWORD64 F27; PDWORD64 F28; PDWORD64 F29; PDWORD64 F30; PDWORD64 F31; } KNONVOLATILE_CONTEXT_POINTERS, *PKNONVOLATILE_CONTEXT_POINTERS; #elif defined(HOST_S390X) // There is no context for s390x defined in winnt.h, // so we re-use the amd64 values. #define CONTEXT_S390X 0x100000 #define CONTEXT_CONTROL (CONTEXT_S390X | 0x1L) #define CONTEXT_INTEGER (CONTEXT_S390X | 0x2L) #define CONTEXT_FLOATING_POINT (CONTEXT_S390X | 0x4L) #define CONTEXT_FULL (CONTEXT_CONTROL | CONTEXT_INTEGER | CONTEXT_FLOATING_POINT) #define CONTEXT_ALL (CONTEXT_CONTROL | CONTEXT_INTEGER | CONTEXT_FLOATING_POINT) #define CONTEXT_EXCEPTION_ACTIVE 0x8000000 #define CONTEXT_SERVICE_ACTIVE 0x10000000 #define CONTEXT_EXCEPTION_REQUEST 0x40000000 #define CONTEXT_EXCEPTION_REPORTING 0x80000000 typedef struct DECLSPEC_ALIGN(8) _CONTEXT { // // Control flags. // DWORD ContextFlags; // // Integer registers. // union { DWORD64 Gpr[16]; struct { DWORD64 R0; DWORD64 R1; DWORD64 R2; DWORD64 R3; DWORD64 R4; DWORD64 R5; DWORD64 R6; DWORD64 R7; DWORD64 R8; DWORD64 R9; DWORD64 R10; DWORD64 R11; DWORD64 R12; DWORD64 R13; DWORD64 R14; DWORD64 R15; }; }; // // Floating-point registers. // union { DWORD64 Fpr[16]; struct { DWORD64 F0; DWORD64 F1; DWORD64 F2; DWORD64 F3; DWORD64 F4; DWORD64 F5; DWORD64 F6; DWORD64 F7; DWORD64 F8; DWORD64 F9; DWORD64 F10; DWORD64 F11; DWORD64 F12; DWORD64 F13; DWORD64 F14; DWORD64 F15; }; }; // // Control registers. // DWORD64 PSWMask; DWORD64 PSWAddr; } CONTEXT, *PCONTEXT, *LPCONTEXT; // // Nonvolatile context pointer record. // typedef struct _KNONVOLATILE_CONTEXT_POINTERS { PDWORD64 R6; PDWORD64 R7; PDWORD64 R8; PDWORD64 R9; PDWORD64 R10; PDWORD64 R11; PDWORD64 R12; PDWORD64 R13; PDWORD64 R14; PDWORD64 R15; } KNONVOLATILE_CONTEXT_POINTERS, *PKNONVOLATILE_CONTEXT_POINTERS; #else #error Unknown architecture for defining CONTEXT. #endif PALIMPORT BOOL PALAPI GetThreadContext( IN HANDLE hThread, IN OUT LPCONTEXT lpContext); PALIMPORT BOOL PALAPI SetThreadContext( IN HANDLE hThread, IN CONST CONTEXT *lpContext); #define THREAD_BASE_PRIORITY_LOWRT 15 #define THREAD_BASE_PRIORITY_MAX 2 #define THREAD_BASE_PRIORITY_MIN (-2) #define THREAD_BASE_PRIORITY_IDLE (-15) #define THREAD_PRIORITY_LOWEST THREAD_BASE_PRIORITY_MIN #define THREAD_PRIORITY_BELOW_NORMAL (THREAD_PRIORITY_LOWEST+1) #define THREAD_PRIORITY_NORMAL 0 #define THREAD_PRIORITY_HIGHEST THREAD_BASE_PRIORITY_MAX #define THREAD_PRIORITY_ABOVE_NORMAL (THREAD_PRIORITY_HIGHEST-1) #define THREAD_PRIORITY_ERROR_RETURN (MAXLONG) #define THREAD_PRIORITY_TIME_CRITICAL THREAD_BASE_PRIORITY_LOWRT #define THREAD_PRIORITY_IDLE THREAD_BASE_PRIORITY_IDLE PALIMPORT int PALAPI GetThreadPriority( IN HANDLE hThread); PALIMPORT BOOL PALAPI SetThreadPriority( IN HANDLE hThread, IN int nPriority); PALIMPORT BOOL PALAPI GetThreadTimes( IN HANDLE hThread, OUT LPFILETIME lpCreationTime, OUT LPFILETIME lpExitTime, OUT LPFILETIME lpKernelTime, OUT LPFILETIME lpUserTime); PALIMPORT HRESULT PALAPI SetThreadDescription( IN HANDLE hThread, IN PCWSTR lpThreadDescription ); #define TLS_OUT_OF_INDEXES ((DWORD)0xFFFFFFFF) PALIMPORT PVOID PALAPI PAL_GetStackBase(); PALIMPORT PVOID PALAPI PAL_GetStackLimit(); PALIMPORT DWORD PALAPI PAL_GetLogicalCpuCountFromOS(); PALIMPORT DWORD PALAPI PAL_GetTotalCpuCount(); PALIMPORT size_t PALAPI PAL_GetRestrictedPhysicalMemoryLimit(); PALIMPORT BOOL PALAPI PAL_GetPhysicalMemoryUsed(size_t* val); PALIMPORT BOOL PALAPI PAL_GetCpuLimit(UINT* val); PALIMPORT size_t PALAPI PAL_GetLogicalProcessorCacheSizeFromOS(); typedef BOOL(*UnwindReadMemoryCallback)(PVOID address, PVOID buffer, SIZE_T size); PALIMPORT BOOL PALAPI PAL_VirtualUnwind(CONTEXT *context, KNONVOLATILE_CONTEXT_POINTERS *contextPointers); PALIMPORT BOOL PALAPI PAL_VirtualUnwindOutOfProc(CONTEXT *context, KNONVOLATILE_CONTEXT_POINTERS *contextPointers, PULONG64 functionStart, SIZE_T baseAddress, UnwindReadMemoryCallback readMemoryCallback); #define GetLogicalProcessorCacheSizeFromOS PAL_GetLogicalProcessorCacheSizeFromOS /* PAL_CS_NATIVE_DATA_SIZE is defined as sizeof(PAL_CRITICAL_SECTION_NATIVE_DATA) */ #if defined(__APPLE__) && defined(__i386__) #define PAL_CS_NATIVE_DATA_SIZE 76 #elif defined(__APPLE__) && defined(__x86_64__) #define PAL_CS_NATIVE_DATA_SIZE 120 #elif defined(__APPLE__) && defined(HOST_ARM64) #define PAL_CS_NATIVE_DATA_SIZE 120 #elif defined(__FreeBSD__) && defined(HOST_X86) #define PAL_CS_NATIVE_DATA_SIZE 12 #elif defined(__FreeBSD__) && defined(__x86_64__) #define PAL_CS_NATIVE_DATA_SIZE 24 #elif defined(__linux__) && defined(HOST_ARM) #define PAL_CS_NATIVE_DATA_SIZE 80 #elif defined(__linux__) && defined(HOST_ARM64) #define PAL_CS_NATIVE_DATA_SIZE 116 #elif defined(__linux__) && defined(__i386__) #define PAL_CS_NATIVE_DATA_SIZE 76 #elif defined(__linux__) && defined(__x86_64__) #define PAL_CS_NATIVE_DATA_SIZE 96 #elif defined(__linux__) && defined(HOST_S390X) #define PAL_CS_NATIVE_DATA_SIZE 96 #elif defined(__NetBSD__) && defined(__amd64__) #define PAL_CS_NATIVE_DATA_SIZE 96 #elif defined(__NetBSD__) && defined(__earm__) #define PAL_CS_NATIVE_DATA_SIZE 56 #elif defined(__NetBSD__) && defined(__i386__) #define PAL_CS_NATIVE_DATA_SIZE 56 #elif defined(__sun) && defined(__x86_64__) #define PAL_CS_NATIVE_DATA_SIZE 48 #elif defined(__linux__) && defined(__loongarch64) #define PAL_CS_NATIVE_DATA_SIZE 96 #else #warning #error PAL_CS_NATIVE_DATA_SIZE is not defined for this architecture #endif // typedef struct _CRITICAL_SECTION { PVOID DebugInfo; LONG LockCount; LONG RecursionCount; HANDLE OwningThread; ULONG_PTR SpinCount; #ifdef PAL_TRACK_CRITICAL_SECTIONS_DATA BOOL bInternal; #endif // PAL_TRACK_CRITICAL_SECTIONS_DATA volatile DWORD dwInitState; union CSNativeDataStorage { BYTE rgNativeDataStorage[PAL_CS_NATIVE_DATA_SIZE]; PVOID pvAlign; // make sure the storage is machine-pointer-size aligned } csnds; } CRITICAL_SECTION, *PCRITICAL_SECTION, *LPCRITICAL_SECTION; PALIMPORT VOID PALAPI EnterCriticalSection(IN OUT LPCRITICAL_SECTION lpCriticalSection); PALIMPORT VOID PALAPI LeaveCriticalSection(IN OUT LPCRITICAL_SECTION lpCriticalSection); PALIMPORT VOID PALAPI InitializeCriticalSection(OUT LPCRITICAL_SECTION lpCriticalSection); PALIMPORT BOOL PALAPI InitializeCriticalSectionEx(LPCRITICAL_SECTION lpCriticalSection, DWORD dwSpinCount, DWORD Flags); PALIMPORT VOID PALAPI DeleteCriticalSection(IN OUT LPCRITICAL_SECTION lpCriticalSection); PALIMPORT BOOL PALAPI TryEnterCriticalSection(IN OUT LPCRITICAL_SECTION lpCriticalSection); #define SEM_FAILCRITICALERRORS 0x0001 #define SEM_NOOPENFILEERRORBOX 0x8000 PALIMPORT UINT PALAPI SetErrorMode( IN UINT uMode); #define PAGE_NOACCESS 0x01 #define PAGE_READONLY 0x02 #define PAGE_READWRITE 0x04 #define PAGE_WRITECOPY 0x08 #define PAGE_EXECUTE 0x10 #define PAGE_EXECUTE_READ 0x20 #define PAGE_EXECUTE_READWRITE 0x40 #define PAGE_EXECUTE_WRITECOPY 0x80 #define MEM_COMMIT 0x1000 #define MEM_RESERVE 0x2000 #define MEM_DECOMMIT 0x4000 #define MEM_RELEASE 0x8000 #define MEM_RESET 0x80000 #define MEM_FREE 0x10000 #define MEM_PRIVATE 0x20000 #define MEM_MAPPED 0x40000 #define MEM_TOP_DOWN 0x100000 #define MEM_WRITE_WATCH 0x200000 #define MEM_LARGE_PAGES 0x20000000 #define MEM_RESERVE_EXECUTABLE 0x40000000 // reserve memory using executable memory allocator PALIMPORT HANDLE PALAPI CreateFileMappingW( IN HANDLE hFile, IN LPSECURITY_ATTRIBUTES lpFileMappingAttributes, IN DWORD flProtect, IN DWORD dwMaxmimumSizeHigh, IN DWORD dwMaximumSizeLow, IN LPCWSTR lpName); #define CreateFileMapping CreateFileMappingW #define SECTION_QUERY 0x0001 #define SECTION_MAP_WRITE 0x0002 #define SECTION_MAP_READ 0x0004 #define SECTION_ALL_ACCESS (SECTION_MAP_READ | SECTION_MAP_WRITE) // diff from winnt.h #define FILE_MAP_WRITE SECTION_MAP_WRITE #define FILE_MAP_READ SECTION_MAP_READ #define FILE_MAP_ALL_ACCESS SECTION_ALL_ACCESS #define FILE_MAP_COPY SECTION_QUERY PALIMPORT HANDLE PALAPI OpenFileMappingW( IN DWORD dwDesiredAccess, IN BOOL bInheritHandle, IN LPCWSTR lpName); #define OpenFileMapping OpenFileMappingW typedef INT_PTR (PALAPI_NOEXPORT *FARPROC)(); PALIMPORT LPVOID PALAPI MapViewOfFile( IN HANDLE hFileMappingObject, IN DWORD dwDesiredAccess, IN DWORD dwFileOffsetHigh, IN DWORD dwFileOffsetLow, IN SIZE_T dwNumberOfBytesToMap); PALIMPORT LPVOID PALAPI MapViewOfFileEx( IN HANDLE hFileMappingObject, IN DWORD dwDesiredAccess, IN DWORD dwFileOffsetHigh, IN DWORD dwFileOffsetLow, IN SIZE_T dwNumberOfBytesToMap, IN LPVOID lpBaseAddress); PALIMPORT BOOL PALAPI UnmapViewOfFile( IN LPCVOID lpBaseAddress); PALIMPORT HMODULE PALAPI LoadLibraryW( IN LPCWSTR lpLibFileName); PALIMPORT HMODULE PALAPI LoadLibraryExW( IN LPCWSTR lpLibFileName, IN /*Reserved*/ HANDLE hFile, IN DWORD dwFlags); PALIMPORT NATIVE_LIBRARY_HANDLE PALAPI PAL_LoadLibraryDirect( IN LPCWSTR lpLibFileName); PALIMPORT BOOL PALAPI PAL_FreeLibraryDirect( IN NATIVE_LIBRARY_HANDLE dl_handle); PALIMPORT HMODULE PALAPI PAL_GetPalHostModule(); PALIMPORT FARPROC PALAPI PAL_GetProcAddressDirect( IN NATIVE_LIBRARY_HANDLE dl_handle, IN LPCSTR lpProcName); /*++ Function: PAL_LOADLoadPEFile Abstract Loads a PE file into memory. Properly maps all of the sections in the PE file. Returns a pointer to the loaded base. Parameters: IN hFile - The file to load IN offset - offset within hFile where the PE "file" is located Return value: A valid base address if successful. 0 if failure --*/ PALIMPORT PVOID PALAPI PAL_LOADLoadPEFile(HANDLE hFile, size_t offset); /*++ PAL_LOADUnloadPEFile Unload a PE file that was loaded by PAL_LOADLoadPEFile(). Parameters: IN ptr - the file pointer returned by PAL_LOADLoadPEFile() Return value: TRUE - success FALSE - failure (incorrect ptr, etc.) --*/ PALIMPORT BOOL PALAPI PAL_LOADUnloadPEFile(PVOID ptr); /*++ PAL_LOADMarkSectionAsNotNeeded Mark a section as NotNeeded that was loaded by PAL_LOADLoadPEFile(). Parameters: IN ptr - the section address mapped by PAL_LOADLoadPEFile() Return value: TRUE - success FALSE - failure (incorrect ptr, etc.) --*/ BOOL PALAPI PAL_LOADMarkSectionAsNotNeeded(void * ptr); #ifdef UNICODE #define LoadLibrary LoadLibraryW #define LoadLibraryEx LoadLibraryExW #else #define LoadLibrary LoadLibraryA #define LoadLibraryEx LoadLibraryExA #endif PALIMPORT FARPROC PALAPI GetProcAddress( IN HMODULE hModule, IN LPCSTR lpProcName); PALIMPORT BOOL PALAPI FreeLibrary( IN OUT HMODULE hLibModule); PALIMPORT PAL_NORETURN VOID PALAPI FreeLibraryAndExitThread( IN HMODULE hLibModule, IN DWORD dwExitCode); PALIMPORT BOOL PALAPI DisableThreadLibraryCalls( IN HMODULE hLibModule); PALIMPORT DWORD PALAPI GetModuleFileNameW( IN HMODULE hModule, OUT LPWSTR lpFileName, IN DWORD nSize); #ifdef UNICODE #define GetModuleFileName GetModuleFileNameW #else #define GetModuleFileName GetModuleFileNameA #endif PALIMPORT DWORD PALAPI GetModuleFileNameExW( IN HANDLE hProcess, IN HMODULE hModule, OUT LPWSTR lpFilename, IN DWORD nSize ); #ifdef UNICODE #define GetModuleFileNameEx GetModuleFileNameExW #endif // Get base address of the module containing a given symbol PALIMPORT LPCVOID PALAPI PAL_GetSymbolModuleBase(PVOID symbol); PALIMPORT int PALAPI PAL_CopyModuleData(PVOID moduleBase, PVOID destinationBufferStart, PVOID destinationBufferEnd);; PALIMPORT LPCSTR PALAPI PAL_GetLoadLibraryError(); PALIMPORT LPVOID PALAPI PAL_VirtualReserveFromExecutableMemoryAllocatorWithinRange( IN LPCVOID lpBeginAddress, IN LPCVOID lpEndAddress, IN SIZE_T dwSize); PALIMPORT void PALAPI PAL_GetExecutableMemoryAllocatorPreferredRange( OUT PVOID *start, OUT PVOID *end); PALIMPORT LPVOID PALAPI VirtualAlloc( IN LPVOID lpAddress, IN SIZE_T dwSize, IN DWORD flAllocationType, IN DWORD flProtect); PALIMPORT BOOL PALAPI VirtualFree( IN LPVOID lpAddress, IN SIZE_T dwSize, IN DWORD dwFreeType); #if defined(HOST_OSX) && defined(HOST_ARM64) PALIMPORT VOID PALAPI PAL_JitWriteProtect(bool writeEnable); #endif // defined(HOST_OSX) && defined(HOST_ARM64) PALIMPORT BOOL PALAPI VirtualProtect( IN LPVOID lpAddress, IN SIZE_T dwSize, IN DWORD flNewProtect, OUT PDWORD lpflOldProtect); typedef struct _MEMORYSTATUSEX { DWORD dwLength; DWORD dwMemoryLoad; DWORDLONG ullTotalPhys; DWORDLONG ullAvailPhys; DWORDLONG ullTotalPageFile; DWORDLONG ullAvailPageFile; DWORDLONG ullTotalVirtual; DWORDLONG ullAvailVirtual; DWORDLONG ullAvailExtendedVirtual; } MEMORYSTATUSEX, *LPMEMORYSTATUSEX; PALIMPORT BOOL PALAPI GlobalMemoryStatusEx( IN OUT LPMEMORYSTATUSEX lpBuffer); typedef struct _MEMORY_BASIC_INFORMATION { PVOID BaseAddress; PVOID AllocationBase_PAL_Undefined; DWORD AllocationProtect; SIZE_T RegionSize; DWORD State; DWORD Protect; DWORD Type; } MEMORY_BASIC_INFORMATION, *PMEMORY_BASIC_INFORMATION; PALIMPORT SIZE_T PALAPI VirtualQuery( IN LPCVOID lpAddress, OUT PMEMORY_BASIC_INFORMATION lpBuffer, IN SIZE_T dwLength); #define MoveMemory memmove #define CopyMemory memcpy #define FillMemory(Destination,Length,Fill) memset((Destination),(Fill),(Length)) #define ZeroMemory(Destination,Length) memset((Destination),0,(Length)) PALIMPORT BOOL PALAPI FlushInstructionCache( IN HANDLE hProcess, IN LPCVOID lpBaseAddress, IN SIZE_T dwSize); #define MAX_LEADBYTES 12 #define MAX_DEFAULTCHAR 2 PALIMPORT UINT PALAPI GetACP(void); typedef struct _cpinfo { UINT MaxCharSize; BYTE DefaultChar[MAX_DEFAULTCHAR]; BYTE LeadByte[MAX_LEADBYTES]; } CPINFO, *LPCPINFO; #define MB_PRECOMPOSED 0x00000001 #define MB_ERR_INVALID_CHARS 0x00000008 PALIMPORT int PALAPI MultiByteToWideChar( IN UINT CodePage, IN DWORD dwFlags, IN LPCSTR lpMultiByteStr, IN int cbMultiByte, OUT LPWSTR lpWideCharStr, IN int cchWideChar); #define WC_NO_BEST_FIT_CHARS 0x00000400 PALIMPORT int PALAPI WideCharToMultiByte( IN UINT CodePage, IN DWORD dwFlags, IN LPCWSTR lpWideCharStr, IN int cchWideChar, OUT LPSTR lpMultiByteStr, IN int cbMultyByte, IN LPCSTR lpDefaultChar, OUT LPBOOL lpUsedDefaultChar); #define EXCEPTION_NONCONTINUABLE 0x1 #define EXCEPTION_UNWINDING 0x2 #define EXCEPTION_EXIT_UNWIND 0x4 // Exit unwind is in progress (not used by PAL SEH) #define EXCEPTION_NESTED_CALL 0x10 // Nested exception handler call #define EXCEPTION_TARGET_UNWIND 0x20 // Target unwind in progress #define EXCEPTION_COLLIDED_UNWIND 0x40 // Collided exception handler call #define EXCEPTION_SKIP_VEH 0x200 #define EXCEPTION_UNWIND (EXCEPTION_UNWINDING | EXCEPTION_EXIT_UNWIND | \ EXCEPTION_TARGET_UNWIND | EXCEPTION_COLLIDED_UNWIND) #define IS_DISPATCHING(Flag) ((Flag & EXCEPTION_UNWIND) == 0) #define IS_UNWINDING(Flag) ((Flag & EXCEPTION_UNWIND) != 0) #define IS_TARGET_UNWIND(Flag) (Flag & EXCEPTION_TARGET_UNWIND) #define EXCEPTION_IS_SIGNAL 0x100 #define EXCEPTION_MAXIMUM_PARAMETERS 15 // Index in the ExceptionInformation array where we will keep the reference // to the native exception that needs to be deleted when dispatching // exception in managed code. #define NATIVE_EXCEPTION_ASYNC_SLOT (EXCEPTION_MAXIMUM_PARAMETERS-1) typedef struct _EXCEPTION_RECORD { DWORD ExceptionCode; DWORD ExceptionFlags; struct _EXCEPTION_RECORD *ExceptionRecord; PVOID ExceptionAddress; DWORD NumberParameters; ULONG_PTR ExceptionInformation[EXCEPTION_MAXIMUM_PARAMETERS]; } EXCEPTION_RECORD, *PEXCEPTION_RECORD; typedef struct _EXCEPTION_POINTERS { PEXCEPTION_RECORD ExceptionRecord; PCONTEXT ContextRecord; } EXCEPTION_POINTERS, *PEXCEPTION_POINTERS, *LPEXCEPTION_POINTERS; typedef LONG EXCEPTION_DISPOSITION; enum { ExceptionContinueExecution, ExceptionContinueSearch, ExceptionNestedException, ExceptionCollidedUnwind, }; // // A function table entry is generated for each frame function. // typedef struct _RUNTIME_FUNCTION { DWORD BeginAddress; #ifdef TARGET_AMD64 DWORD EndAddress; #endif DWORD UnwindData; } RUNTIME_FUNCTION, *PRUNTIME_FUNCTION; #define STANDARD_RIGHTS_REQUIRED (0x000F0000L) #define SYNCHRONIZE (0x00100000L) #define READ_CONTROL (0x00020000L) #define MAXIMUM_ALLOWED (0x02000000L) #define EVENT_MODIFY_STATE (0x0002) #define EVENT_ALL_ACCESS (STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | 0x3) #define MUTANT_QUERY_STATE (0x0001) #define MUTANT_ALL_ACCESS (STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | MUTANT_QUERY_STATE) #define MUTEX_ALL_ACCESS MUTANT_ALL_ACCESS #define SEMAPHORE_MODIFY_STATE (0x0002) #define SEMAPHORE_ALL_ACCESS (STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | 0x3) #define PROCESS_TERMINATE (0x0001) #define PROCESS_CREATE_THREAD (0x0002) #define PROCESS_SET_SESSIONID (0x0004) #define PROCESS_VM_OPERATION (0x0008) #define PROCESS_VM_READ (0x0010) #define PROCESS_VM_WRITE (0x0020) #define PROCESS_DUP_HANDLE (0x0040) #define PROCESS_CREATE_PROCESS (0x0080) #define PROCESS_SET_QUOTA (0x0100) #define PROCESS_SET_INFORMATION (0x0200) #define PROCESS_QUERY_INFORMATION (0x0400) #define PROCESS_SUSPEND_RESUME (0x0800) #define PROCESS_ALL_ACCESS (STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | \ 0xFFF) PALIMPORT HANDLE PALAPI OpenProcess( IN DWORD dwDesiredAccess, /* PROCESS_DUP_HANDLE or PROCESS_ALL_ACCESS */ IN BOOL bInheritHandle, IN DWORD dwProcessId ); PALIMPORT BOOL PALAPI EnumProcessModules( IN HANDLE hProcess, OUT HMODULE *lphModule, IN DWORD cb, OUT LPDWORD lpcbNeeded ); PALIMPORT VOID PALAPI OutputDebugStringA( IN LPCSTR lpOutputString); PALIMPORT VOID PALAPI OutputDebugStringW( IN LPCWSTR lpOutputStrig); #ifdef UNICODE #define OutputDebugString OutputDebugStringW #else #define OutputDebugString OutputDebugStringA #endif PALIMPORT VOID PALAPI DebugBreak(); PALIMPORT DWORD PALAPI GetEnvironmentVariableW( IN LPCWSTR lpName, OUT LPWSTR lpBuffer, IN DWORD nSize); #ifdef UNICODE #define GetEnvironmentVariable GetEnvironmentVariableW #else #define GetEnvironmentVariable GetEnvironmentVariableA #endif PALIMPORT BOOL PALAPI SetEnvironmentVariableW( IN LPCWSTR lpName, IN LPCWSTR lpValue); #ifdef UNICODE #define SetEnvironmentVariable SetEnvironmentVariableW #else #define SetEnvironmentVariable SetEnvironmentVariableA #endif PALIMPORT LPWSTR PALAPI GetEnvironmentStringsW(); #define GetEnvironmentStrings GetEnvironmentStringsW PALIMPORT BOOL PALAPI FreeEnvironmentStringsW( IN LPWSTR); #define FreeEnvironmentStrings FreeEnvironmentStringsW PALIMPORT BOOL PALAPI CloseHandle( IN OUT HANDLE hObject); PALIMPORT VOID PALAPI RaiseException( IN DWORD dwExceptionCode, IN DWORD dwExceptionFlags, IN DWORD nNumberOfArguments, IN CONST ULONG_PTR *lpArguments); PALIMPORT VOID PALAPI RaiseFailFastException( IN PEXCEPTION_RECORD pExceptionRecord, IN PCONTEXT pContextRecord, IN DWORD dwFlags); PALIMPORT DWORD PALAPI GetTickCount(); PALIMPORT ULONGLONG PALAPI GetTickCount64(); PALIMPORT BOOL PALAPI QueryPerformanceCounter( OUT LARGE_INTEGER *lpPerformanceCount ); PALIMPORT BOOL PALAPI QueryPerformanceFrequency( OUT LARGE_INTEGER *lpFrequency ); PALIMPORT BOOL PALAPI QueryThreadCycleTime( IN HANDLE ThreadHandle, OUT PULONG64 CycleTime); PALIMPORT INT PALAPI PAL_nanosleep( IN long timeInNs); typedef EXCEPTION_DISPOSITION (PALAPI_NOEXPORT *PVECTORED_EXCEPTION_HANDLER)( struct _EXCEPTION_POINTERS *ExceptionPointers); // Define BitScanForward64 and BitScanForward // Per MSDN, BitScanForward64 will search the mask data from LSB to MSB for a set bit. // If one is found, its bit position is stored in the out PDWORD argument and 1 is returned; // otherwise, an undefined value is stored in the out PDWORD argument and 0 is returned. // // On GCC, the equivalent function is __builtin_ffsll. It returns 1+index of the least // significant set bit, or 0 if if mask is zero. // // The same is true for BitScanForward, except that the GCC function is __builtin_ffs. EXTERN_C PALIMPORT inline unsigned char PALAPI BitScanForward( IN OUT PDWORD Index, IN UINT qwMask) { int iIndex = __builtin_ffs(qwMask); // Set the Index after deducting unity *Index = (DWORD)(iIndex - 1); // Both GCC and Clang generate better, smaller code if we check whether the // mask was/is zero rather than the equivalent check that iIndex is zero. return qwMask != 0 ? TRUE : FALSE; } EXTERN_C PALIMPORT inline unsigned char PALAPI BitScanForward64( IN OUT PDWORD Index, IN UINT64 qwMask) { int iIndex = __builtin_ffsll(qwMask); // Set the Index after deducting unity *Index = (DWORD)(iIndex - 1); // Both GCC and Clang generate better, smaller code if we check whether the // mask was/is zero rather than the equivalent check that iIndex is zero. return qwMask != 0 ? TRUE : FALSE; } // Define BitScanReverse64 and BitScanReverse // Per MSDN, BitScanReverse64 will search the mask data from MSB to LSB for a set bit. // If one is found, its bit position is stored in the out PDWORD argument and 1 is returned. // Otherwise, an undefined value is stored in the out PDWORD argument and 0 is returned. // // GCC/clang don't have a directly equivalent intrinsic; they do provide the __builtin_clzll // intrinsic, which returns the number of leading 0-bits in x starting at the most significant // bit position (the result is undefined when x = 0). // // The same is true for BitScanReverse, except that the GCC function is __builtin_clzl. EXTERN_C PALIMPORT inline unsigned char PALAPI BitScanReverse( IN OUT PDWORD Index, IN UINT qwMask) { // The result of __builtin_clzl is undefined when qwMask is zero, // but it's still OK to call the intrinsic in that case (just don't use the output). // Unconditionally calling the intrinsic in this way allows the compiler to // emit branchless code for this function when possible (depending on how the // intrinsic is implemented for the target platform). int lzcount = __builtin_clzl(qwMask); *Index = (DWORD)(31 - lzcount); return qwMask != 0; } EXTERN_C PALIMPORT inline unsigned char PALAPI BitScanReverse64( IN OUT PDWORD Index, IN UINT64 qwMask) { // The result of __builtin_clzll is undefined when qwMask is zero, // but it's still OK to call the intrinsic in that case (just don't use the output). // Unconditionally calling the intrinsic in this way allows the compiler to // emit branchless code for this function when possible (depending on how the // intrinsic is implemented for the target platform). int lzcount = __builtin_clzll(qwMask); *Index = (DWORD)(63 - lzcount); return qwMask != 0; } FORCEINLINE void PAL_ArmInterlockedOperationBarrier() { #ifdef HOST_ARM64 // On arm64, most of the __sync* functions generate a code sequence like: // loop: // ldaxr (load acquire exclusive) // ... // stlxr (store release exclusive) // cbnz loop // // It is possible for a load following the code sequence above to be reordered to occur prior to the store above due to the // release barrier, this is substantiated by https://github.com/dotnet/coreclr/pull/17508. Interlocked operations in the PAL // require the load to occur after the store. This memory barrier should be used following a call to a __sync* function to // prevent that reordering. Code generated for arm32 includes a 'dmb' after 'cbnz', so no issue there at the moment. __sync_synchronize(); #endif // HOST_ARM64 #ifdef HOST_LOONGARCH64 __sync_synchronize(); #endif } /*++ Function: InterlockedAdd The InterlockedAdd function adds the value of the specified variable with another specified value. The function prevents more than one thread from using the same variable simultaneously. Parameters lpAddend [in/out] Pointer to the variable to add. lpAddend [in] The value to add. Return Values The return value is the resulting added value. --*/ EXTERN_C PALIMPORT inline LONG PALAPI InterlockedAdd( IN OUT LONG volatile *lpAddend, IN LONG value) { LONG result = __sync_add_and_fetch(lpAddend, value); PAL_ArmInterlockedOperationBarrier(); return result; } EXTERN_C PALIMPORT inline LONGLONG PALAPI InterlockedAdd64( IN OUT LONGLONG volatile *lpAddend, IN LONGLONG value) { LONGLONG result = __sync_add_and_fetch(lpAddend, value); PAL_ArmInterlockedOperationBarrier(); return result; } /*++ Function: InterlockedIncrement The InterlockedIncrement function increments (increases by one) the value of the specified variable and checks the resulting value. The function prevents more than one thread from using the same variable simultaneously. Parameters lpAddend [in/out] Pointer to the variable to increment. Return Values The return value is the resulting incremented value. --*/ EXTERN_C PALIMPORT inline LONG PALAPI InterlockedIncrement( IN OUT LONG volatile *lpAddend) { LONG result = __sync_add_and_fetch(lpAddend, (LONG)1); PAL_ArmInterlockedOperationBarrier(); return result; } EXTERN_C PALIMPORT inline LONGLONG PALAPI InterlockedIncrement64( IN OUT LONGLONG volatile *lpAddend) { LONGLONG result = __sync_add_and_fetch(lpAddend, (LONGLONG)1); PAL_ArmInterlockedOperationBarrier(); return result; } /*++ Function: InterlockedDecrement The InterlockedDecrement function decrements (decreases by one) the value of the specified variable and checks the resulting value. The function prevents more than one thread from using the same variable simultaneously. Parameters lpAddend [in/out] Pointer to the variable to decrement. Return Values The return value is the resulting decremented value. --*/ EXTERN_C PALIMPORT inline LONG PALAPI InterlockedDecrement( IN OUT LONG volatile *lpAddend) { LONG result = __sync_sub_and_fetch(lpAddend, (LONG)1); PAL_ArmInterlockedOperationBarrier(); return result; } #define InterlockedDecrementAcquire InterlockedDecrement #define InterlockedDecrementRelease InterlockedDecrement EXTERN_C PALIMPORT inline LONGLONG PALAPI InterlockedDecrement64( IN OUT LONGLONG volatile *lpAddend) { LONGLONG result = __sync_sub_and_fetch(lpAddend, (LONGLONG)1); PAL_ArmInterlockedOperationBarrier(); return result; } /*++ Function: InterlockedExchange The InterlockedExchange function atomically exchanges a pair of values. The function prevents more than one thread from using the same variable simultaneously. Parameters Target [in/out] Pointer to the value to exchange. The function sets this variable to Value, and returns its prior value. Value [in] Specifies a new value for the variable pointed to by Target. Return Values The function returns the initial value pointed to by Target. --*/ EXTERN_C PALIMPORT inline LONG PALAPI InterlockedExchange( IN OUT LONG volatile *Target, IN LONG Value) { LONG result = __atomic_exchange_n(Target, Value, __ATOMIC_ACQ_REL); PAL_ArmInterlockedOperationBarrier(); return result; } EXTERN_C PALIMPORT inline LONGLONG PALAPI InterlockedExchange64( IN OUT LONGLONG volatile *Target, IN LONGLONG Value) { LONGLONG result = __atomic_exchange_n(Target, Value, __ATOMIC_ACQ_REL); PAL_ArmInterlockedOperationBarrier(); return result; } /*++ Function: InterlockedCompareExchange The InterlockedCompareExchange function performs an atomic comparison of the specified values and exchanges the values, based on the outcome of the comparison. The function prevents more than one thread from using the same variable simultaneously. If you are exchanging pointer values, this function has been superseded by the InterlockedCompareExchangePointer function. Parameters Destination [in/out] Specifies the address of the destination value. The sign is ignored. Exchange [in] Specifies the exchange value. The sign is ignored. Comperand [in] Specifies the value to compare to Destination. The sign is ignored. Return Values The return value is the initial value of the destination. --*/ EXTERN_C PALIMPORT inline LONG PALAPI InterlockedCompareExchange( IN OUT LONG volatile *Destination, IN LONG Exchange, IN LONG Comperand) { LONG result = __sync_val_compare_and_swap( Destination, /* The pointer to a variable whose value is to be compared with. */ Comperand, /* The value to be compared */ Exchange /* The value to be stored */); PAL_ArmInterlockedOperationBarrier(); return result; } #define InterlockedCompareExchangeAcquire InterlockedCompareExchange #define InterlockedCompareExchangeRelease InterlockedCompareExchange // See the 32-bit variant in interlock2.s EXTERN_C PALIMPORT inline LONGLONG PALAPI InterlockedCompareExchange64( IN OUT LONGLONG volatile *Destination, IN LONGLONG Exchange, IN LONGLONG Comperand) { LONGLONG result = __sync_val_compare_and_swap( Destination, /* The pointer to a variable whose value is to be compared with. */ Comperand, /* The value to be compared */ Exchange /* The value to be stored */); PAL_ArmInterlockedOperationBarrier(); return result; } /*++ Function: InterlockedExchangeAdd The InterlockedExchangeAdd function atomically adds the value of 'Value' to the variable that 'Addend' points to. Parameters lpAddend [in/out] Pointer to the variable to to added. Return Values The return value is the original value that 'Addend' pointed to. --*/ EXTERN_C PALIMPORT inline LONG PALAPI InterlockedExchangeAdd( IN OUT LONG volatile *Addend, IN LONG Value) { LONG result = __sync_fetch_and_add(Addend, Value); PAL_ArmInterlockedOperationBarrier(); return result; } EXTERN_C PALIMPORT inline LONGLONG PALAPI InterlockedExchangeAdd64( IN OUT LONGLONG volatile *Addend, IN LONGLONG Value) { LONGLONG result = __sync_fetch_and_add(Addend, Value); PAL_ArmInterlockedOperationBarrier(); return result; } EXTERN_C PALIMPORT inline LONG PALAPI InterlockedAnd( IN OUT LONG volatile *Destination, IN LONG Value) { LONG result = __sync_fetch_and_and(Destination, Value); PAL_ArmInterlockedOperationBarrier(); return result; } EXTERN_C PALIMPORT inline LONG PALAPI InterlockedOr( IN OUT LONG volatile *Destination, IN LONG Value) { LONG result = __sync_fetch_and_or(Destination, Value); PAL_ArmInterlockedOperationBarrier(); return result; } EXTERN_C PALIMPORT inline UCHAR PALAPI InterlockedBitTestAndReset( IN OUT LONG volatile *Base, IN LONG Bit) { return (InterlockedAnd(Base, ~(1 << Bit)) & (1 << Bit)) != 0; } EXTERN_C PALIMPORT inline UCHAR PALAPI InterlockedBitTestAndSet( IN OUT LONG volatile *Base, IN LONG Bit) { return (InterlockedOr(Base, (1 << Bit)) & (1 << Bit)) != 0; } #if defined(HOST_64BIT) #define InterlockedExchangePointer(Target, Value) \ ((PVOID)InterlockedExchange64((PLONG64)(Target), (LONGLONG)(Value))) #define InterlockedCompareExchangePointer(Destination, ExChange, Comperand) \ ((PVOID)InterlockedCompareExchange64((PLONG64)(Destination), (LONGLONG)(ExChange), (LONGLONG)(Comperand))) #else #define InterlockedExchangePointer(Target, Value) \ ((PVOID)(UINT_PTR)InterlockedExchange((PLONG)(UINT_PTR)(Target), (LONG)(UINT_PTR)(Value))) #define InterlockedCompareExchangePointer(Destination, ExChange, Comperand) \ ((PVOID)(UINT_PTR)InterlockedCompareExchange((PLONG)(UINT_PTR)(Destination), (LONG)(UINT_PTR)(ExChange), (LONG)(UINT_PTR)(Comperand))) #endif /*++ Function: MemoryBarrier The MemoryBarrier function creates a full memory barrier. --*/ EXTERN_C PALIMPORT inline VOID PALAPI MemoryBarrier() { __sync_synchronize(); } EXTERN_C PALIMPORT inline VOID PALAPI YieldProcessor() { #if defined(HOST_X86) || defined(HOST_AMD64) __asm__ __volatile__( "rep\n" "nop"); #elif defined(HOST_ARM) || defined(HOST_ARM64) __asm__ __volatile__( "yield"); #elif defined(HOST_LOONGARCH64) __asm__ volatile( "dbar 0; \n"); #else return; #endif } PALIMPORT DWORD PALAPI GetCurrentProcessorNumber(); /*++ Function: PAL_HasGetCurrentProcessorNumber Checks if GetCurrentProcessorNumber is available in the current environment --*/ PALIMPORT BOOL PALAPI PAL_HasGetCurrentProcessorNumber(); #define FORMAT_MESSAGE_ALLOCATE_BUFFER 0x00000100 #define FORMAT_MESSAGE_IGNORE_INSERTS 0x00000200 #define FORMAT_MESSAGE_FROM_STRING 0x00000400 #define FORMAT_MESSAGE_FROM_SYSTEM 0x00001000 #define FORMAT_MESSAGE_ARGUMENT_ARRAY 0x00002000 #define FORMAT_MESSAGE_MAX_WIDTH_MASK 0x000000FF PALIMPORT DWORD PALAPI FormatMessageW( IN DWORD dwFlags, IN LPCVOID lpSource, IN DWORD dwMessageId, IN DWORD dwLanguageId, OUT LPWSTR lpBffer, IN DWORD nSize, IN va_list *Arguments); #ifdef UNICODE #define FormatMessage FormatMessageW #endif PALIMPORT DWORD PALAPI GetLastError(); PALIMPORT VOID PALAPI SetLastError( IN DWORD dwErrCode); PALIMPORT LPWSTR PALAPI GetCommandLineW(); #ifdef UNICODE #define GetCommandLine GetCommandLineW #endif PALIMPORT VOID PALAPI RtlRestoreContext( IN PCONTEXT ContextRecord, IN PEXCEPTION_RECORD ExceptionRecord ); PALIMPORT VOID PALAPI RtlCaptureContext( OUT PCONTEXT ContextRecord ); PALIMPORT VOID PALAPI FlushProcessWriteBuffers(); typedef void (*PAL_ActivationFunction)(CONTEXT *context); typedef BOOL (*PAL_SafeActivationCheckFunction)(SIZE_T ip, BOOL checkingCurrentThread); PALIMPORT VOID PALAPI PAL_SetActivationFunction( IN PAL_ActivationFunction pActivationFunction, IN PAL_SafeActivationCheckFunction pSafeActivationCheckFunction); PALIMPORT BOOL PALAPI PAL_InjectActivation( IN HANDLE hThread ); #define VER_PLATFORM_WIN32_WINDOWS 1 #define VER_PLATFORM_WIN32_NT 2 #define VER_PLATFORM_UNIX 10 #define VER_PLATFORM_MACOSX 11 typedef struct _OSVERSIONINFOA { DWORD dwOSVersionInfoSize; DWORD dwMajorVersion; DWORD dwMinorVersion; DWORD dwBuildNumber; DWORD dwPlatformId; CHAR szCSDVersion[ 128 ]; } OSVERSIONINFOA, *POSVERSIONINFOA, *LPOSVERSIONINFOA; typedef struct _OSVERSIONINFOW { DWORD dwOSVersionInfoSize; DWORD dwMajorVersion; DWORD dwMinorVersion; DWORD dwBuildNumber; DWORD dwPlatformId; WCHAR szCSDVersion[ 128 ]; } OSVERSIONINFOW, *POSVERSIONINFOW, *LPOSVERSIONINFOW; #ifdef UNICODE typedef OSVERSIONINFOW OSVERSIONINFO; typedef POSVERSIONINFOW POSVERSIONINFO; typedef LPOSVERSIONINFOW LPOSVERSIONINFO; #else typedef OSVERSIONINFOA OSVERSIONINFO; typedef POSVERSIONINFOA POSVERSIONINFO; typedef LPOSVERSIONINFOA LPOSVERSIONINFO; #endif typedef struct _OSVERSIONINFOEXA { DWORD dwOSVersionInfoSize; DWORD dwMajorVersion; DWORD dwMinorVersion; DWORD dwBuildNumber; DWORD dwPlatformId; CHAR szCSDVersion[ 128 ]; WORD wServicePackMajor; WORD wServicePackMinor; WORD wSuiteMask; BYTE wProductType; BYTE wReserved; } OSVERSIONINFOEXA, *POSVERSIONINFOEXA, *LPOSVERSIONINFOEXA; typedef struct _OSVERSIONINFOEXW { DWORD dwOSVersionInfoSize; DWORD dwMajorVersion; DWORD dwMinorVersion; DWORD dwBuildNumber; DWORD dwPlatformId; WCHAR szCSDVersion[ 128 ]; WORD wServicePackMajor; WORD wServicePackMinor; WORD wSuiteMask; BYTE wProductType; BYTE wReserved; } OSVERSIONINFOEXW, *POSVERSIONINFOEXW, *LPOSVERSIONINFOEXW; #ifdef UNICODE typedef OSVERSIONINFOEXW OSVERSIONINFOEX; typedef POSVERSIONINFOEXW POSVERSIONINFOEX; typedef LPOSVERSIONINFOEXW LPOSVERSIONINFOEX; #else typedef OSVERSIONINFOEXA OSVERSIONINFOEX; typedef POSVERSIONINFOEXA POSVERSIONINFOEX; typedef LPOSVERSIONINFOEXA LPOSVERSIONINFOEX; #endif typedef struct _SYSTEM_INFO { WORD wProcessorArchitecture_PAL_Undefined; WORD wReserved_PAL_Undefined; // NOTE: diff from winbase.h - no obsolete dwOemId union DWORD dwPageSize; LPVOID lpMinimumApplicationAddress; LPVOID lpMaximumApplicationAddress; DWORD_PTR dwActiveProcessorMask_PAL_Undefined; DWORD dwNumberOfProcessors; DWORD dwProcessorType_PAL_Undefined; DWORD dwAllocationGranularity; WORD wProcessorLevel_PAL_Undefined; WORD wProcessorRevision_PAL_Undefined; } SYSTEM_INFO, *LPSYSTEM_INFO; PALIMPORT VOID PALAPI GetSystemInfo( OUT LPSYSTEM_INFO lpSystemInfo); PALIMPORT BOOL PALAPI CreatePipe( OUT PHANDLE hReadPipe, OUT PHANDLE hWritePipe, IN LPSECURITY_ATTRIBUTES lpPipeAttributes, IN DWORD nSize ); // // NUMA related APIs // PALIMPORT BOOL PALAPI GetNumaHighestNodeNumber( OUT PULONG HighestNodeNumber ); PALIMPORT BOOL PALAPI PAL_GetNumaProcessorNode(WORD procNo, WORD* node); PALIMPORT LPVOID PALAPI VirtualAllocExNuma( IN HANDLE hProcess, IN OPTIONAL LPVOID lpAddress, IN SIZE_T dwSize, IN DWORD flAllocationType, IN DWORD flProtect, IN DWORD nndPreferred ); PALIMPORT BOOL PALAPI PAL_SetCurrentThreadAffinity(WORD procNo); PALIMPORT BOOL PALAPI PAL_GetCurrentThreadAffinitySet(SIZE_T size, UINT_PTR* data); // // The types of events that can be logged. // #define EVENTLOG_SUCCESS 0x0000 #define EVENTLOG_ERROR_TYPE 0x0001 #define EVENTLOG_WARNING_TYPE 0x0002 #define EVENTLOG_INFORMATION_TYPE 0x0004 #define EVENTLOG_AUDIT_SUCCESS 0x0008 #define EVENTLOG_AUDIT_FAILURE 0x0010 #if defined FEATURE_PAL_ANSI #include "palprivate.h" #endif //FEATURE_PAL_ANSI /******************* C Runtime Entrypoints *******************************/ /* Some C runtime functions needs to be reimplemented by the PAL. To avoid name collisions, those functions have been renamed using defines */ #ifndef PAL_STDCPP_COMPAT #define exit PAL_exit #define printf PAL_printf #define vprintf PAL_vprintf #define wprintf PAL_wprintf #define wcstod PAL_wcstod #define wcstoul PAL_wcstoul #define wcscat PAL_wcscat #define wcscpy PAL_wcscpy #define wcslen PAL_wcslen #define wcsncmp PAL_wcsncmp #define wcschr PAL_wcschr #define wcsrchr PAL_wcsrchr #define wcsstr PAL_wcsstr #define swscanf PAL_swscanf #define wcspbrk PAL_wcspbrk #define wcscmp PAL_wcscmp #define wcsncpy PAL_wcsncpy #define realloc PAL_realloc #define fopen PAL_fopen #define strtok PAL_strtok #define strtoul PAL_strtoul #define strtoull PAL_strtoull #define fprintf PAL_fprintf #define fwprintf PAL_fwprintf #define vfprintf PAL_vfprintf #define vfwprintf PAL_vfwprintf #define rand PAL_rand #define time PAL_time #define getenv PAL_getenv #define fgets PAL_fgets #define qsort PAL_qsort #define bsearch PAL_bsearch #define ferror PAL_ferror #define fread PAL_fread #define fwrite PAL_fwrite #define ftell PAL_ftell #define fclose PAL_fclose #define fflush PAL_fflush #define fputs PAL_fputs #define fseek PAL_fseek #define fgetpos PAL_fgetpos #define fsetpos PAL_fsetpos #define setvbuf PAL_setvbuf #define acos PAL_acos #define asin PAL_asin #define atan2 PAL_atan2 #define exp PAL_exp #define ilogb PAL_ilogb #define log PAL_log #define log10 PAL_log10 #define pow PAL_pow #define sincos PAL_sincos #define acosf PAL_acosf #define asinf PAL_asinf #define atan2f PAL_atan2f #define expf PAL_expf #define ilogbf PAL_ilogbf #define logf PAL_logf #define log10f PAL_log10f #define powf PAL_powf #define sincosf PAL_sincosf #define malloc PAL_malloc #define free PAL_free #define _strdup PAL__strdup #define _open PAL__open #define _pread PAL__pread #define _close PAL__close #define _wcstoui64 PAL__wcstoui64 #define _flushall PAL__flushall #define strnlen PAL_strnlen #define wcsnlen PAL_wcsnlen #ifdef HOST_AMD64 #define _mm_getcsr PAL__mm_getcsr #define _mm_setcsr PAL__mm_setcsr #endif // HOST_AMD64 #endif // !PAL_STDCPP_COMPAT #ifndef _CONST_RETURN #ifdef __cplusplus #define _CONST_RETURN const #define _CRT_CONST_CORRECT_OVERLOADS #else #define _CONST_RETURN #endif #endif /* For backwards compatibility */ #define _WConst_return _CONST_RETURN #define EOF (-1) typedef int errno_t; #if defined(__WINT_TYPE__) typedef __WINT_TYPE__ wint_t; #else typedef unsigned int wint_t; #endif #ifndef PAL_STDCPP_COMPAT #if defined(_DEBUG) /*++ Function: PAL_memcpy Overlapping buffer-safe version of memcpy. See MSDN doc for memcpy --*/ EXTERN_C PALIMPORT DLLEXPORT void *PAL_memcpy (void *dest, const void * src, size_t count); PALIMPORT void * __cdecl memcpy(void *, const void *, size_t) THROW_DECL; #define memcpy PAL_memcpy #define IS_PAL_memcpy 1 #define TEST_PAL_DEFERRED(def) IS_##def #define IS_REDEFINED_IN_PAL(def) TEST_PAL_DEFERRED(def) #else //defined(_DEBUG) PALIMPORT void * __cdecl memcpy(void *, const void *, size_t); #endif //defined(_DEBUG) PALIMPORT int __cdecl memcmp(const void *, const void *, size_t); PALIMPORT void * __cdecl memset(void *, int, size_t); PALIMPORT void * __cdecl memmove(void *, const void *, size_t); PALIMPORT void * __cdecl memchr(const void *, int, size_t); PALIMPORT long long int __cdecl atoll(const char *) MATH_THROW_DECL; PALIMPORT size_t __cdecl strlen(const char *); PALIMPORT int __cdecl strcmp(const char*, const char *); PALIMPORT int __cdecl strncmp(const char*, const char *, size_t); PALIMPORT int __cdecl _strnicmp(const char *, const char *, size_t); PALIMPORT char * __cdecl strcat(char *, const char *); PALIMPORT char * __cdecl strncat(char *, const char *, size_t); PALIMPORT char * __cdecl strcpy(char *, const char *); PALIMPORT char * __cdecl strncpy(char *, const char *, size_t); PALIMPORT char * __cdecl strchr(const char *, int); PALIMPORT char * __cdecl strrchr(const char *, int); PALIMPORT char * __cdecl strpbrk(const char *, const char *); PALIMPORT char * __cdecl strstr(const char *, const char *); PALIMPORT char * __cdecl strtok(char *, const char *); PALIMPORT size_t __cdecl strspn(const char *, const char *); PALIMPORT size_t __cdecl strcspn(const char *, const char *); PALIMPORT int __cdecl atoi(const char *); PALIMPORT ULONG __cdecl strtoul(const char *, char **, int); PALIMPORT ULONGLONG __cdecl strtoull(const char *, char **, int); PALIMPORT double __cdecl atof(const char *); PALIMPORT double __cdecl strtod(const char *, char **); PALIMPORT int __cdecl isprint(int); PALIMPORT int __cdecl isspace(int); PALIMPORT int __cdecl isalpha(int); PALIMPORT int __cdecl isalnum(int); PALIMPORT int __cdecl isdigit(int); PALIMPORT int __cdecl isxdigit(int); PALIMPORT int __cdecl isupper(int); PALIMPORT int __cdecl islower(int); PALIMPORT int __cdecl tolower(int); PALIMPORT int __cdecl toupper(int); PALIMPORT int __cdecl iswalpha(wint_t); PALIMPORT int __cdecl iswdigit(wint_t); PALIMPORT int __cdecl iswupper(wint_t); PALIMPORT int __cdecl iswprint(wint_t); PALIMPORT int __cdecl iswspace(wint_t); PALIMPORT int __cdecl iswxdigit(wint_t); PALIMPORT wint_t __cdecl towupper(wint_t); PALIMPORT wint_t __cdecl towlower(wint_t); #endif // PAL_STDCPP_COMPAT /* _TRUNCATE */ #if !defined(_TRUNCATE) #define _TRUNCATE ((size_t)-1) #endif PALIMPORT DLLEXPORT errno_t __cdecl memcpy_s(void *, size_t, const void *, size_t) THROW_DECL; PALIMPORT errno_t __cdecl memmove_s(void *, size_t, const void *, size_t); PALIMPORT DLLEXPORT int __cdecl _stricmp(const char *, const char *); PALIMPORT DLLEXPORT int __cdecl vsprintf_s(char *, size_t, const char *, va_list); PALIMPORT char * __cdecl _gcvt_s(char *, int, double, int); PALIMPORT int __cdecl __iscsym(int); PALIMPORT DLLEXPORT int __cdecl _wcsicmp(const WCHAR *, const WCHAR*); PALIMPORT int __cdecl _wcsnicmp(const WCHAR *, const WCHAR *, size_t); PALIMPORT int __cdecl _vsnprintf(char *, size_t, const char *, va_list); PALIMPORT DLLEXPORT int __cdecl _vsnprintf_s(char *, size_t, size_t, const char *, va_list); PALIMPORT DLLEXPORT int __cdecl _vsnwprintf_s(WCHAR *, size_t, size_t, const WCHAR *, va_list); PALIMPORT DLLEXPORT int __cdecl _snwprintf_s(WCHAR *, size_t, size_t, const WCHAR *, ...); PALIMPORT DLLEXPORT int __cdecl _snprintf_s(char *, size_t, size_t, const char *, ...); PALIMPORT DLLEXPORT int __cdecl sprintf_s(char *, size_t, const char *, ... ); PALIMPORT DLLEXPORT int __cdecl swprintf_s(WCHAR *, size_t, const WCHAR *, ... ); PALIMPORT int __cdecl _snwprintf_s(WCHAR *, size_t, size_t, const WCHAR *, ...); PALIMPORT int __cdecl vswprintf_s( WCHAR *, size_t, const WCHAR *, va_list); PALIMPORT DLLEXPORT int __cdecl sscanf_s(const char *, const char *, ...); PALIMPORT DLLEXPORT errno_t __cdecl _itow_s(int, WCHAR *, size_t, int); PALIMPORT DLLEXPORT size_t __cdecl PAL_wcslen(const WCHAR *); PALIMPORT DLLEXPORT int __cdecl PAL_wcscmp(const WCHAR*, const WCHAR*); PALIMPORT DLLEXPORT int __cdecl PAL_wcsncmp(const WCHAR *, const WCHAR *, size_t); PALIMPORT DLLEXPORT WCHAR * __cdecl PAL_wcscat(WCHAR *, const WCHAR *); PALIMPORT WCHAR * __cdecl PAL_wcscpy(WCHAR *, const WCHAR *); PALIMPORT WCHAR * __cdecl PAL_wcsncpy(WCHAR *, const WCHAR *, size_t); PALIMPORT DLLEXPORT const WCHAR * __cdecl PAL_wcschr(const WCHAR *, WCHAR); PALIMPORT DLLEXPORT const WCHAR * __cdecl PAL_wcsrchr(const WCHAR *, WCHAR); PALIMPORT WCHAR _WConst_return * __cdecl PAL_wcspbrk(const WCHAR *, const WCHAR *); PALIMPORT DLLEXPORT WCHAR _WConst_return * __cdecl PAL_wcsstr(const WCHAR *, const WCHAR *); PALIMPORT int __cdecl PAL_swprintf(WCHAR *, const WCHAR *, ...); PALIMPORT int __cdecl PAL_vswprintf(WCHAR *, const WCHAR *, va_list); PALIMPORT int __cdecl PAL_swscanf(const WCHAR *, const WCHAR *, ...); PALIMPORT DLLEXPORT ULONG __cdecl PAL_wcstoul(const WCHAR *, WCHAR **, int); PALIMPORT double __cdecl PAL_wcstod(const WCHAR *, WCHAR **); PALIMPORT errno_t __cdecl _wcslwr_s(WCHAR *, size_t sz); PALIMPORT DLLEXPORT ULONGLONG _wcstoui64(const WCHAR *, WCHAR **, int); PALIMPORT DLLEXPORT errno_t __cdecl _i64tow_s(long long, WCHAR *, size_t, int); PALIMPORT int __cdecl _wtoi(const WCHAR *); #ifdef __cplusplus extern "C++" { inline WCHAR *PAL_wcschr(WCHAR* S, WCHAR C) {return ((WCHAR *)PAL_wcschr((const WCHAR *)S, C)); } inline WCHAR *PAL_wcsrchr(WCHAR* S, WCHAR C) {return ((WCHAR *)PAL_wcsrchr((const WCHAR *)S, C)); } inline WCHAR *PAL_wcspbrk(WCHAR* S, const WCHAR* P) {return ((WCHAR *)PAL_wcspbrk((const WCHAR *)S, P)); } inline WCHAR *PAL_wcsstr(WCHAR* S, const WCHAR* P) {return ((WCHAR *)PAL_wcsstr((const WCHAR *)S, P)); } } #endif #if defined(__llvm__) #define HAS_ROTL __has_builtin(_rotl) #define HAS_ROTR __has_builtin(_rotr) #else #define HAS_ROTL 0 #define HAS_ROTR 0 #endif #if !HAS_ROTL /*++ Function: _rotl See MSDN doc. --*/ EXTERN_C PALIMPORT inline unsigned int __cdecl _rotl(unsigned int value, int shift) { unsigned int retval = 0; shift &= 0x1f; retval = (value << shift) | (value >> (sizeof(int) * CHAR_BIT - shift)); return retval; } #endif // !HAS_ROTL // On 64 bit unix, make the long an int. #ifdef HOST_64BIT #define _lrotl _rotl #endif // HOST_64BIT #if !HAS_ROTR /*++ Function: _rotr See MSDN doc. --*/ EXTERN_C PALIMPORT inline unsigned int __cdecl _rotr(unsigned int value, int shift) { unsigned int retval; shift &= 0x1f; retval = (value >> shift) | (value << (sizeof(int) * CHAR_BIT - shift)); return retval; } #endif // !HAS_ROTR PALIMPORT int __cdecl abs(int); // clang complains if this is declared with __int64 PALIMPORT long long __cdecl llabs(long long); #ifndef PAL_STDCPP_COMPAT PALIMPORT int __cdecl _finite(double); PALIMPORT int __cdecl _isnan(double); PALIMPORT double __cdecl _copysign(double, double); PALIMPORT double __cdecl acos(double); PALIMPORT double __cdecl acosh(double) MATH_THROW_DECL; PALIMPORT double __cdecl asin(double); PALIMPORT double __cdecl asinh(double) MATH_THROW_DECL; PALIMPORT double __cdecl atan(double) MATH_THROW_DECL; PALIMPORT double __cdecl atanh(double) MATH_THROW_DECL; PALIMPORT double __cdecl atan2(double, double); PALIMPORT double __cdecl cbrt(double) MATH_THROW_DECL; PALIMPORT double __cdecl ceil(double); PALIMPORT double __cdecl cos(double); PALIMPORT double __cdecl cosh(double); PALIMPORT double __cdecl exp(double); PALIMPORT double __cdecl fabs(double); PALIMPORT double __cdecl floor(double); PALIMPORT double __cdecl fmod(double, double); PALIMPORT double __cdecl fma(double, double, double) MATH_THROW_DECL; PALIMPORT int __cdecl ilogb(double); PALIMPORT double __cdecl log(double); PALIMPORT double __cdecl log2(double) MATH_THROW_DECL; PALIMPORT double __cdecl log10(double); PALIMPORT double __cdecl modf(double, double*); PALIMPORT double __cdecl pow(double, double); PALIMPORT double __cdecl sin(double); PALIMPORT void __cdecl sincos(double, double*, double*); PALIMPORT double __cdecl sinh(double); PALIMPORT double __cdecl sqrt(double); PALIMPORT double __cdecl tan(double); PALIMPORT double __cdecl tanh(double); PALIMPORT double __cdecl trunc(double); PALIMPORT int __cdecl _finitef(float); PALIMPORT int __cdecl _isnanf(float); PALIMPORT float __cdecl _copysignf(float, float); PALIMPORT float __cdecl acosf(float); PALIMPORT float __cdecl acoshf(float) MATH_THROW_DECL; PALIMPORT float __cdecl asinf(float); PALIMPORT float __cdecl asinhf(float) MATH_THROW_DECL; PALIMPORT float __cdecl atanf(float) MATH_THROW_DECL; PALIMPORT float __cdecl atanhf(float) MATH_THROW_DECL; PALIMPORT float __cdecl atan2f(float, float); PALIMPORT float __cdecl cbrtf(float) MATH_THROW_DECL; PALIMPORT float __cdecl ceilf(float); PALIMPORT float __cdecl cosf(float); PALIMPORT float __cdecl coshf(float); PALIMPORT float __cdecl expf(float); PALIMPORT float __cdecl fabsf(float); PALIMPORT float __cdecl floorf(float); PALIMPORT float __cdecl fmodf(float, float); PALIMPORT float __cdecl fmaf(float, float, float) MATH_THROW_DECL; PALIMPORT int __cdecl ilogbf(float); PALIMPORT float __cdecl logf(float); PALIMPORT float __cdecl log2f(float) MATH_THROW_DECL; PALIMPORT float __cdecl log10f(float); PALIMPORT float __cdecl modff(float, float*); PALIMPORT float __cdecl powf(float, float); PALIMPORT float __cdecl sinf(float); PALIMPORT void __cdecl sincosf(float, float*, float*); PALIMPORT float __cdecl sinhf(float); PALIMPORT float __cdecl sqrtf(float); PALIMPORT float __cdecl tanf(float); PALIMPORT float __cdecl tanhf(float); PALIMPORT float __cdecl truncf(float); #endif // !PAL_STDCPP_COMPAT #ifndef PAL_STDCPP_COMPAT #ifdef __cplusplus extern "C++" { inline __int64 abs(__int64 _X) { return llabs(_X); } } #endif PALIMPORT DLLEXPORT void * __cdecl malloc(size_t); PALIMPORT DLLEXPORT void __cdecl free(void *); PALIMPORT DLLEXPORT void * __cdecl realloc(void *, size_t); PALIMPORT char * __cdecl _strdup(const char *); #if defined(_MSC_VER) #define alloca _alloca #else #define _alloca alloca #endif //_MSC_VER #define alloca __builtin_alloca #define max(a, b) (((a) > (b)) ? (a) : (b)) #define min(a, b) (((a) < (b)) ? (a) : (b)) #endif // !PAL_STDCPP_COMPAT PALIMPORT PAL_NORETURN void __cdecl exit(int); #ifndef PAL_STDCPP_COMPAT PALIMPORT DLLEXPORT void __cdecl qsort(void *, size_t, size_t, int(__cdecl *)(const void *, const void *)); PALIMPORT DLLEXPORT void * __cdecl bsearch(const void *, const void *, size_t, size_t, int(__cdecl *)(const void *, const void *)); PALIMPORT time_t __cdecl time(time_t *); #endif // !PAL_STDCPP_COMPAT PALIMPORT DLLEXPORT int __cdecl _open(const char *szPath, int nFlags, ...); PALIMPORT DLLEXPORT size_t __cdecl _pread(int fd, void *buf, size_t nbytes, ULONG64 offset); PALIMPORT DLLEXPORT int __cdecl _close(int); PALIMPORT DLLEXPORT int __cdecl _flushall(); #ifdef PAL_STDCPP_COMPAT struct _PAL_FILE; typedef struct _PAL_FILE PAL_FILE; #else // PAL_STDCPP_COMPAT struct _FILE; typedef struct _FILE FILE; typedef struct _FILE PAL_FILE; #define SEEK_SET 0 #define SEEK_CUR 1 #define SEEK_END 2 /* Locale categories */ #define LC_ALL 0 #define LC_COLLATE 1 #define LC_CTYPE 2 #define LC_MONETARY 3 #define LC_NUMERIC 4 #define LC_TIME 5 #define _IOFBF 0 /* setvbuf should set fully buffered */ #define _IOLBF 1 /* setvbuf should set line buffered */ #define _IONBF 2 /* setvbuf should set unbuffered */ #endif // PAL_STDCPP_COMPAT PALIMPORT int __cdecl PAL_fclose(PAL_FILE *); PALIMPORT DLLEXPORT int __cdecl PAL_fflush(PAL_FILE *); PALIMPORT size_t __cdecl PAL_fwrite(const void *, size_t, size_t, PAL_FILE *); PALIMPORT size_t __cdecl PAL_fread(void *, size_t, size_t, PAL_FILE *); PALIMPORT char * __cdecl PAL_fgets(char *, int, PAL_FILE *); PALIMPORT int __cdecl PAL_fputs(const char *, PAL_FILE *); PALIMPORT DLLEXPORT int __cdecl PAL_fprintf(PAL_FILE *, const char *, ...); PALIMPORT int __cdecl PAL_vfprintf(PAL_FILE *, const char *, va_list); PALIMPORT int __cdecl PAL_fseek(PAL_FILE *, LONG, int); PALIMPORT LONG __cdecl PAL_ftell(PAL_FILE *); PALIMPORT int __cdecl PAL_ferror(PAL_FILE *); PALIMPORT PAL_FILE * __cdecl PAL_fopen(const char *, const char *); PALIMPORT int __cdecl PAL_setvbuf(PAL_FILE *stream, char *, int, size_t); PALIMPORT DLLEXPORT int __cdecl PAL_fwprintf(PAL_FILE *, const WCHAR *, ...); PALIMPORT int __cdecl PAL_vfwprintf(PAL_FILE *, const WCHAR *, va_list); PALIMPORT int __cdecl PAL_wprintf(const WCHAR*, ...); PALIMPORT int __cdecl _getw(PAL_FILE *); PALIMPORT int __cdecl _putw(int, PAL_FILE *); PALIMPORT PAL_FILE * __cdecl _fdopen(int, const char *); PALIMPORT PAL_FILE * __cdecl _wfopen(const WCHAR *, const WCHAR *); /* Maximum value that can be returned by the rand function. */ #ifndef PAL_STDCPP_COMPAT #define RAND_MAX 0x7fff #endif // !PAL_STDCPP_COMPAT PALIMPORT int __cdecl rand(void); PALIMPORT void __cdecl srand(unsigned int); PALIMPORT DLLEXPORT int __cdecl printf(const char *, ...); PALIMPORT int __cdecl vprintf(const char *, va_list); #ifdef _MSC_VER #define PAL_get_caller _MSC_VER #else #define PAL_get_caller 0 #endif PALIMPORT DLLEXPORT PAL_FILE * __cdecl PAL_get_stdout(int caller); PALIMPORT PAL_FILE * __cdecl PAL_get_stdin(int caller); PALIMPORT DLLEXPORT PAL_FILE * __cdecl PAL_get_stderr(int caller); PALIMPORT DLLEXPORT int * __cdecl PAL_errno(int caller); #ifdef PAL_STDCPP_COMPAT #define PAL_stdout (PAL_get_stdout(PAL_get_caller)) #define PAL_stdin (PAL_get_stdin(PAL_get_caller)) #define PAL_stderr (PAL_get_stderr(PAL_get_caller)) #define PAL_errno (*PAL_errno(PAL_get_caller)) #else // PAL_STDCPP_COMPAT #define stdout (PAL_get_stdout(PAL_get_caller)) #define stdin (PAL_get_stdin(PAL_get_caller)) #define stderr (PAL_get_stderr(PAL_get_caller)) #define errno (*PAL_errno(PAL_get_caller)) #endif // PAL_STDCPP_COMPAT PALIMPORT DLLEXPORT char * __cdecl getenv(const char *); PALIMPORT DLLEXPORT int __cdecl _putenv(const char *); #define ERANGE 34 PALIMPORT WCHAR __cdecl PAL_ToUpperInvariant(WCHAR); PALIMPORT WCHAR __cdecl PAL_ToLowerInvariant(WCHAR); /******************* PAL-specific I/O completion port *****************/ typedef struct _PAL_IOCP_CPU_INFORMATION { union { FILETIME ftLastRecordedIdleTime; FILETIME ftLastRecordedCurrentTime; } LastRecordedTime; FILETIME ftLastRecordedKernelTime; FILETIME ftLastRecordedUserTime; } PAL_IOCP_CPU_INFORMATION; PALIMPORT INT PALAPI PAL_GetCPUBusyTime( IN OUT PAL_IOCP_CPU_INFORMATION *lpPrevCPUInfo); /****************PAL Perf functions for PInvoke*********************/ #if PAL_PERF PALIMPORT VOID PALAPI PAL_EnableProcessProfile(); PALIMPORT VOID PALAPI PAL_DisableProcessProfile(); PALIMPORT BOOL PALAPI PAL_IsProcessProfileEnabled(); PALIMPORT INT64 PALAPI PAL_GetCpuTickCount(); #endif // PAL_PERF /******************* PAL functions for SIMD extensions *****************/ PALIMPORT unsigned int _mm_getcsr(void); PALIMPORT void _mm_setcsr(unsigned int i); /******************* PAL functions for CPU capability detection *******/ #ifdef __cplusplus class CORJIT_FLAGS; PALIMPORT VOID PALAPI PAL_GetJitCpuCapabilityFlags(CORJIT_FLAGS *flags); #endif #ifdef __cplusplus PALIMPORT VOID PALAPI PAL_FreeExceptionRecords( IN EXCEPTION_RECORD *exceptionRecord, IN CONTEXT *contextRecord); #define EXCEPTION_CONTINUE_SEARCH 0 #define EXCEPTION_EXECUTE_HANDLER 1 #define EXCEPTION_CONTINUE_EXECUTION -1 struct PAL_SEHException { private: static const SIZE_T NoTargetFrameSp = (SIZE_T)SIZE_MAX; void Move(PAL_SEHException& ex) { ExceptionPointers.ExceptionRecord = ex.ExceptionPointers.ExceptionRecord; ExceptionPointers.ContextRecord = ex.ExceptionPointers.ContextRecord; TargetFrameSp = ex.TargetFrameSp; RecordsOnStack = ex.RecordsOnStack; ManagedToNativeExceptionCallback = ex.ManagedToNativeExceptionCallback; ManagedToNativeExceptionCallbackContext = ex.ManagedToNativeExceptionCallbackContext; ex.Clear(); } void FreeRecords() { if (ExceptionPointers.ExceptionRecord != NULL && !RecordsOnStack ) { PAL_FreeExceptionRecords(ExceptionPointers.ExceptionRecord, ExceptionPointers.ContextRecord); ExceptionPointers.ExceptionRecord = NULL; ExceptionPointers.ContextRecord = NULL; } } public: EXCEPTION_POINTERS ExceptionPointers; // Target frame stack pointer set before the 2nd pass. SIZE_T TargetFrameSp; bool RecordsOnStack; void(*ManagedToNativeExceptionCallback)(void* context); void* ManagedToNativeExceptionCallbackContext; PAL_SEHException(EXCEPTION_RECORD *pExceptionRecord, CONTEXT *pContextRecord, bool onStack = false) { ExceptionPointers.ExceptionRecord = pExceptionRecord; ExceptionPointers.ContextRecord = pContextRecord; TargetFrameSp = NoTargetFrameSp; RecordsOnStack = onStack; ManagedToNativeExceptionCallback = NULL; ManagedToNativeExceptionCallbackContext = NULL; } PAL_SEHException() { Clear(); } // The copy constructor and copy assignment operators are deleted so that the PAL_SEHException // can never be copied, only moved. This enables simple lifetime management of the exception and // context records, since there is always just one PAL_SEHException instance referring to the same records. PAL_SEHException(const PAL_SEHException& ex) = delete; PAL_SEHException& operator=(const PAL_SEHException& ex) = delete; PAL_SEHException(PAL_SEHException&& ex) { Move(ex); } PAL_SEHException& operator=(PAL_SEHException&& ex) { FreeRecords(); Move(ex); return *this; } ~PAL_SEHException() { FreeRecords(); } void Clear() { ExceptionPointers.ExceptionRecord = NULL; ExceptionPointers.ContextRecord = NULL; TargetFrameSp = NoTargetFrameSp; RecordsOnStack = false; ManagedToNativeExceptionCallback = NULL; ManagedToNativeExceptionCallbackContext = NULL; } CONTEXT* GetContextRecord() { return ExceptionPointers.ContextRecord; } EXCEPTION_RECORD* GetExceptionRecord() { return ExceptionPointers.ExceptionRecord; } bool IsFirstPass() { return (TargetFrameSp == NoTargetFrameSp); } void SecondPassDone() { TargetFrameSp = NoTargetFrameSp; } bool HasPropagateExceptionCallback() { return ManagedToNativeExceptionCallback != NULL; } void SetPropagateExceptionCallback( void(*callback)(void*), void* context) { ManagedToNativeExceptionCallback = callback; ManagedToNativeExceptionCallbackContext = context; } }; typedef BOOL (*PHARDWARE_EXCEPTION_HANDLER)(PAL_SEHException* ex); typedef BOOL (*PHARDWARE_EXCEPTION_SAFETY_CHECK_FUNCTION)(PCONTEXT contextRecord, PEXCEPTION_RECORD exceptionRecord); typedef VOID (*PTERMINATION_REQUEST_HANDLER)(int terminationExitCode); typedef DWORD (*PGET_GCMARKER_EXCEPTION_CODE)(LPVOID ip); PALIMPORT VOID PALAPI PAL_SetHardwareExceptionHandler( IN PHARDWARE_EXCEPTION_HANDLER exceptionHandler, IN PHARDWARE_EXCEPTION_SAFETY_CHECK_FUNCTION exceptionCheckFunction); PALIMPORT VOID PALAPI PAL_SetGetGcMarkerExceptionCode( IN PGET_GCMARKER_EXCEPTION_CODE getGcMarkerExceptionCode); PALIMPORT VOID PALAPI PAL_ThrowExceptionFromContext( IN CONTEXT* context, IN PAL_SEHException* ex); PALIMPORT VOID PALAPI PAL_SetTerminationRequestHandler( IN PTERMINATION_REQUEST_HANDLER terminationRequestHandler); PALIMPORT VOID PALAPI PAL_CatchHardwareExceptionHolderEnter(); PALIMPORT VOID PALAPI PAL_CatchHardwareExceptionHolderExit(); // // This holder is used to indicate that a hardware // exception should be raised as a C++ exception // to better emulate SEH on the xplat platforms. // class CatchHardwareExceptionHolder { public: CatchHardwareExceptionHolder() { PAL_CatchHardwareExceptionHolderEnter(); } ~CatchHardwareExceptionHolder() { PAL_CatchHardwareExceptionHolderExit(); } static bool IsEnabled(); }; // // NOTE: This is only defined in one PAL test. // #ifdef FEATURE_ENABLE_HARDWARE_EXCEPTIONS #define HardwareExceptionHolder CatchHardwareExceptionHolder __catchHardwareException; #else #define HardwareExceptionHolder #endif // FEATURE_ENABLE_HARDWARE_EXCEPTIONS class NativeExceptionHolderBase; PALIMPORT PALAPI NativeExceptionHolderBase ** PAL_GetNativeExceptionHolderHead(); extern "C++" { // // This is the base class of native exception holder used to provide // the filter function to the exception dispatcher. This allows the // filter to be called during the first pass to better emulate SEH // the xplat platforms that only have C++ exception support. // class NativeExceptionHolderBase { // Save the address of the holder head so the destructor // doesn't have access the slow (on Linux) TLS value again. NativeExceptionHolderBase **m_head; // The next holder on the stack NativeExceptionHolderBase *m_next; protected: NativeExceptionHolderBase() { m_head = nullptr; m_next = nullptr; } ~NativeExceptionHolderBase() { // Only destroy if Push was called if (m_head != nullptr) { *m_head = m_next; m_head = nullptr; m_next = nullptr; } } public: // Calls the holder's filter handler. virtual EXCEPTION_DISPOSITION InvokeFilter(PAL_SEHException& ex) = 0; // Adds the holder to the "stack" of holders. This is done explicitly instead // of in the constructor was to avoid the mess of move constructors combined // with return value optimization (in CreateHolder). void Push() { NativeExceptionHolderBase **head = PAL_GetNativeExceptionHolderHead(); m_head = head; m_next = *head; *head = this; } // Given the currentHolder and locals stack range find the next holder starting with this one // To find the first holder, pass nullptr as the currentHolder. static NativeExceptionHolderBase *FindNextHolder(NativeExceptionHolderBase *currentHolder, PVOID frameLowAddress, PVOID frameHighAddress); }; // // This is the second part of the native exception filter holder. It is // templated because the lambda used to wrap the exception filter is a // unknown type. // template<class FilterType> class NativeExceptionHolder : public NativeExceptionHolderBase { FilterType* m_exceptionFilter; public: NativeExceptionHolder(FilterType* exceptionFilter) : NativeExceptionHolderBase() { m_exceptionFilter = exceptionFilter; } virtual EXCEPTION_DISPOSITION InvokeFilter(PAL_SEHException& ex) { return (*m_exceptionFilter)(ex); } }; // // This is a native exception holder that is used when the catch catches // all exceptions. // class NativeExceptionHolderCatchAll : public NativeExceptionHolderBase { public: NativeExceptionHolderCatchAll() : NativeExceptionHolderBase() { } virtual EXCEPTION_DISPOSITION InvokeFilter(PAL_SEHException& ex) { return EXCEPTION_EXECUTE_HANDLER; } }; // This is a native exception holder that doesn't catch any exceptions. class NativeExceptionHolderNoCatch : public NativeExceptionHolderBase { public: NativeExceptionHolderNoCatch() : NativeExceptionHolderBase() { } virtual EXCEPTION_DISPOSITION InvokeFilter(PAL_SEHException& ex) { return EXCEPTION_CONTINUE_SEARCH; } }; // // This factory class for the native exception holder is necessary because // templated functions don't need the explicit type parameter and can infer // the template type from the parameter. // class NativeExceptionHolderFactory { public: template<class FilterType> static NativeExceptionHolder<FilterType> CreateHolder(FilterType* exceptionFilter) { return NativeExceptionHolder<FilterType>(exceptionFilter); } }; // Start of a try block for exceptions raised by RaiseException #define PAL_TRY(__ParamType, __paramDef, __paramRef) \ { \ __ParamType __param = __paramRef; \ auto tryBlock = [](__ParamType __paramDef) \ { // Start of an exception handler. If an exception raised by the RaiseException // occurs in the try block and the disposition is EXCEPTION_EXECUTE_HANDLER, // the handler code is executed. If the disposition is EXCEPTION_CONTINUE_SEARCH, // the exception is rethrown. The EXCEPTION_CONTINUE_EXECUTION disposition is // not supported. #define PAL_EXCEPT(dispositionExpression) \ }; \ const bool isFinally = false; \ auto finallyBlock = []() {}; \ EXCEPTION_DISPOSITION disposition = EXCEPTION_CONTINUE_EXECUTION; \ auto exceptionFilter = [&disposition, &__param](PAL_SEHException& ex) \ { \ (void)__param; \ disposition = dispositionExpression; \ _ASSERTE(disposition != EXCEPTION_CONTINUE_EXECUTION); \ return disposition; \ }; \ try \ { \ HardwareExceptionHolder \ auto __exceptionHolder = NativeExceptionHolderFactory::CreateHolder(&exceptionFilter); \ __exceptionHolder.Push(); \ tryBlock(__param); \ } \ catch (PAL_SEHException& ex) \ { \ if (disposition == EXCEPTION_CONTINUE_EXECUTION) \ { \ exceptionFilter(ex); \ } \ if (disposition == EXCEPTION_CONTINUE_SEARCH) \ { \ throw; \ } \ ex.SecondPassDone(); // Start of an exception handler. It works the same way as the PAL_EXCEPT except // that the disposition is obtained by calling the specified filter. #define PAL_EXCEPT_FILTER(filter) PAL_EXCEPT(filter(&ex.ExceptionPointers, __param)) // Start of a finally block. The finally block is executed both when the try block // finishes or when an exception is raised using the RaiseException in it. #define PAL_FINALLY \ }; \ const bool isFinally = true; \ auto finallyBlock = [&]() \ { // End of an except or a finally block. #define PAL_ENDTRY \ }; \ if (isFinally) \ { \ try \ { \ tryBlock(__param); \ } \ catch (...) \ { \ finallyBlock(); \ throw; \ } \ finallyBlock(); \ } \ } } // extern "C++" #define PAL_CPP_THROW(type, obj) { throw obj; } #define PAL_CPP_RETHROW { throw; } #define PAL_CPP_TRY try { HardwareExceptionHolder #define PAL_CPP_CATCH_EXCEPTION(ident) } catch (Exception *ident) { #define PAL_CPP_CATCH_EXCEPTION_NOARG } catch (Exception *) { #define PAL_CPP_CATCH_DERIVED(type, ident) } catch (type *ident) { #define PAL_CPP_CATCH_ALL } catch (...) { \ try { throw; } \ catch (PAL_SEHException& ex) { ex.SecondPassDone(); } \ catch (...) {} #define PAL_CPP_ENDTRY } #ifdef _MSC_VER #pragma warning(disable:4611) // interaction between '_setjmp' and C++ object destruction is non-portable #endif #define PAL_TRY_FOR_DLLMAIN(ParamType, paramDef, paramRef, _reason) PAL_TRY(ParamType, paramDef, paramRef) #endif // __cplusplus // Platform-specific library naming // #ifdef __APPLE__ #define MAKEDLLNAME_W(name) u"lib" name u".dylib" #define MAKEDLLNAME_A(name) "lib" name ".dylib" #else #define MAKEDLLNAME_W(name) u"lib" name u".so" #define MAKEDLLNAME_A(name) "lib" name ".so" #endif #ifdef UNICODE #define MAKEDLLNAME(x) MAKEDLLNAME_W(x) #else #define MAKEDLLNAME(x) MAKEDLLNAME_A(x) #endif #define PAL_SHLIB_PREFIX "lib" #define PAL_SHLIB_PREFIX_W u"lib" #if __APPLE__ #define PAL_SHLIB_SUFFIX ".dylib" #define PAL_SHLIB_SUFFIX_W u".dylib" #else #define PAL_SHLIB_SUFFIX ".so" #define PAL_SHLIB_SUFFIX_W u".so" #endif #define DBG_EXCEPTION_HANDLED ((DWORD )0x00010001L) #define DBG_CONTINUE ((DWORD )0x00010002L) #define DBG_EXCEPTION_NOT_HANDLED ((DWORD )0x80010001L) #define DBG_TERMINATE_THREAD ((DWORD )0x40010003L) #define DBG_TERMINATE_PROCESS ((DWORD )0x40010004L) #define DBG_CONTROL_C ((DWORD )0x40010005L) #define DBG_RIPEXCEPTION ((DWORD )0x40010007L) #define DBG_CONTROL_BREAK ((DWORD )0x40010008L) #define DBG_COMMAND_EXCEPTION ((DWORD )0x40010009L) #define STATUS_USER_APC ((DWORD )0x000000C0L) #define STATUS_GUARD_PAGE_VIOLATION ((DWORD )0x80000001L) #define STATUS_DATATYPE_MISALIGNMENT ((DWORD )0x80000002L) #define STATUS_BREAKPOINT ((DWORD )0x80000003L) #define STATUS_SINGLE_STEP ((DWORD )0x80000004L) #define STATUS_LONGJUMP ((DWORD )0x80000026L) #define STATUS_UNWIND_CONSOLIDATE ((DWORD )0x80000029L) #define STATUS_ACCESS_VIOLATION ((DWORD )0xC0000005L) #define STATUS_IN_PAGE_ERROR ((DWORD )0xC0000006L) #define STATUS_INVALID_HANDLE ((DWORD )0xC0000008L) #define STATUS_NO_MEMORY ((DWORD )0xC0000017L) #define STATUS_ILLEGAL_INSTRUCTION ((DWORD )0xC000001DL) #define STATUS_NONCONTINUABLE_EXCEPTION ((DWORD )0xC0000025L) #define STATUS_INVALID_DISPOSITION ((DWORD )0xC0000026L) #define STATUS_ARRAY_BOUNDS_EXCEEDED ((DWORD )0xC000008CL) #define STATUS_FLOAT_DENORMAL_OPERAND ((DWORD )0xC000008DL) #define STATUS_FLOAT_DIVIDE_BY_ZERO ((DWORD )0xC000008EL) #define STATUS_FLOAT_INEXACT_RESULT ((DWORD )0xC000008FL) #define STATUS_FLOAT_INVALID_OPERATION ((DWORD )0xC0000090L) #define STATUS_FLOAT_OVERFLOW ((DWORD )0xC0000091L) #define STATUS_FLOAT_STACK_CHECK ((DWORD )0xC0000092L) #define STATUS_FLOAT_UNDERFLOW ((DWORD )0xC0000093L) #define STATUS_INTEGER_DIVIDE_BY_ZERO ((DWORD )0xC0000094L) #define STATUS_INTEGER_OVERFLOW ((DWORD )0xC0000095L) #define STATUS_PRIVILEGED_INSTRUCTION ((DWORD )0xC0000096L) #define STATUS_STACK_OVERFLOW ((DWORD )0xC00000FDL) #define STATUS_CONTROL_C_EXIT ((DWORD )0xC000013AL) #define WAIT_IO_COMPLETION STATUS_USER_APC #define EXCEPTION_ACCESS_VIOLATION STATUS_ACCESS_VIOLATION #define EXCEPTION_DATATYPE_MISALIGNMENT STATUS_DATATYPE_MISALIGNMENT #define EXCEPTION_BREAKPOINT STATUS_BREAKPOINT #define EXCEPTION_SINGLE_STEP STATUS_SINGLE_STEP #define EXCEPTION_ARRAY_BOUNDS_EXCEEDED STATUS_ARRAY_BOUNDS_EXCEEDED #define EXCEPTION_FLT_DENORMAL_OPERAND STATUS_FLOAT_DENORMAL_OPERAND #define EXCEPTION_FLT_DIVIDE_BY_ZERO STATUS_FLOAT_DIVIDE_BY_ZERO #define EXCEPTION_FLT_INEXACT_RESULT STATUS_FLOAT_INEXACT_RESULT #define EXCEPTION_FLT_INVALID_OPERATION STATUS_FLOAT_INVALID_OPERATION #define EXCEPTION_FLT_OVERFLOW STATUS_FLOAT_OVERFLOW #define EXCEPTION_FLT_STACK_CHECK STATUS_FLOAT_STACK_CHECK #define EXCEPTION_FLT_UNDERFLOW STATUS_FLOAT_UNDERFLOW #define EXCEPTION_INT_DIVIDE_BY_ZERO STATUS_INTEGER_DIVIDE_BY_ZERO #define EXCEPTION_INT_OVERFLOW STATUS_INTEGER_OVERFLOW #define EXCEPTION_PRIV_INSTRUCTION STATUS_PRIVILEGED_INSTRUCTION #define EXCEPTION_IN_PAGE_ERROR STATUS_IN_PAGE_ERROR #define EXCEPTION_ILLEGAL_INSTRUCTION STATUS_ILLEGAL_INSTRUCTION #define EXCEPTION_NONCONTINUABLE_EXCEPTION STATUS_NONCONTINUABLE_EXCEPTION #define EXCEPTION_STACK_OVERFLOW STATUS_STACK_OVERFLOW #define EXCEPTION_INVALID_DISPOSITION STATUS_INVALID_DISPOSITION #define EXCEPTION_GUARD_PAGE STATUS_GUARD_PAGE_VIOLATION #define EXCEPTION_INVALID_HANDLE STATUS_INVALID_HANDLE #define CONTROL_C_EXIT STATUS_CONTROL_C_EXIT /******************* HRESULT types ****************************************/ #define FACILITY_WINDOWS 8 #define FACILITY_URT 19 #define FACILITY_UMI 22 #define FACILITY_SXS 23 #define FACILITY_STORAGE 3 #define FACILITY_SSPI 9 #define FACILITY_SCARD 16 #define FACILITY_SETUPAPI 15 #define FACILITY_SECURITY 9 #define FACILITY_RPC 1 #define FACILITY_WIN32 7 #define FACILITY_CONTROL 10 #define FACILITY_NULL 0 #define FACILITY_MSMQ 14 #define FACILITY_MEDIASERVER 13 #define FACILITY_INTERNET 12 #define FACILITY_ITF 4 #define FACILITY_DPLAY 21 #define FACILITY_DISPATCH 2 #define FACILITY_COMPLUS 17 #define FACILITY_CERT 11 #define FACILITY_ACS 20 #define FACILITY_AAF 18 #define NO_ERROR 0L #define SEVERITY_SUCCESS 0 #define SEVERITY_ERROR 1 #define SUCCEEDED(Status) ((HRESULT)(Status) >= 0) #define FAILED(Status) ((HRESULT)(Status)<0) #define IS_ERROR(Status) ((ULONG)(Status) >> 31 == SEVERITY_ERROR) // diff from win32 #define HRESULT_CODE(hr) ((hr) & 0xFFFF) #define SCODE_CODE(sc) ((sc) & 0xFFFF) #define HRESULT_FACILITY(hr) (((hr) >> 16) & 0x1fff) #define SCODE_FACILITY(sc) (((sc) >> 16) & 0x1fff) #define HRESULT_SEVERITY(hr) (((hr) >> 31) & 0x1) #define SCODE_SEVERITY(sc) (((sc) >> 31) & 0x1) // both macros diff from Win32 #define MAKE_HRESULT(sev,fac,code) \ ((HRESULT) (((ULONG)(sev)<<31) | ((ULONG)(fac)<<16) | ((ULONG)(code))) ) #define MAKE_SCODE(sev,fac,code) \ ((SCODE) (((ULONG)(sev)<<31) | ((ULONG)(fac)<<16) | ((LONG)(code))) ) #define FACILITY_NT_BIT 0x10000000 #define HRESULT_FROM_WIN32(x) ((HRESULT)(x) <= 0 ? ((HRESULT)(x)) : ((HRESULT) (((x) & 0x0000FFFF) | (FACILITY_WIN32 << 16) | 0x80000000))) #define __HRESULT_FROM_WIN32(x) HRESULT_FROM_WIN32(x) #define HRESULT_FROM_NT(x) ((HRESULT) ((x) | FACILITY_NT_BIT)) #ifdef __cplusplus } #endif #endif // __PAL_H__
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*++ Module Name: pal.h Abstract: CoreCLR Platform Adaptation Layer (PAL) header file. This file defines all types and API calls required by the CoreCLR when compiled for Unix-like systems. Defines which control the behavior of this include file: UNICODE - define it to set the Ansi/Unicode neutral names to be the ...W names. Otherwise the neutral names default to be the ...A names. PAL_IMPLEMENTATION - define it when implementing the PAL. Otherwise leave it undefined when consuming the PAL. Note: some fields in structs have been renamed from the original SDK documentation names, with _PAL_Undefined appended. This leaves the structure layout identical to its Win32 version, but prevents PAL consumers from inadvertently referencing undefined fields. If you want to add a PAL_ wrapper function to a native function in here, you also need to edit palinternal.h and win32pal.h. --*/ #ifndef __PAL_H__ #define __PAL_H__ #ifdef PAL_STDCPP_COMPAT #include <float.h> #include <limits.h> #include <stddef.h> #include <stdio.h> #include <stdlib.h> #include <stdarg.h> #include <stdint.h> #include <string.h> #include <errno.h> #include <ctype.h> #include <sys/stat.h> #include <sys/types.h> #include <unistd.h> #endif #ifdef __cplusplus extern "C" { #endif // This macro is used to standardize the wide character string literals between UNIX and Windows. // Unix L"" is UTF32, and on windows it's UTF16. Because of built-in assumptions on the size // of string literals, it's important to match behaviour between Unix and Windows. Unix will be defined // as u"" (char16_t) #define W(str) u##str // Undefine the QUOTE_MACRO_L helper and redefine it in terms of u. // The reason that we do this is that quote macro is defined in ndp\common\inc, // not inside of coreclr sources. #define QUOTE_MACRO_L(x) QUOTE_MACRO_u(x) #define QUOTE_MACRO_u_HELPER(x) u###x #define QUOTE_MACRO_u(x) QUOTE_MACRO_u_HELPER(x) #include <pal_error.h> #include <pal_mstypes.h> // Native system libray handle. // On Unix systems, NATIVE_LIBRARY_HANDLE type represents a library handle not registered with the PAL. typedef PVOID NATIVE_LIBRARY_HANDLE; /******************* Processor-specific glue *****************************/ #ifndef _MSC_VER #if defined(__i686__) && !defined(_M_IX86) #define _M_IX86 600 #elif defined(__i586__) && !defined(_M_IX86) #define _M_IX86 500 #elif defined(__i486__) && !defined(_M_IX86) #define _M_IX86 400 #elif defined(__i386__) && !defined(_M_IX86) #define _M_IX86 300 #elif defined(__x86_64__) && !defined(_M_AMD64) #define _M_AMD64 100 #elif defined(__arm__) && !defined(_M_ARM) #define _M_ARM 7 #elif defined(__aarch64__) && !defined(_M_ARM64) #define _M_ARM64 1 #elif defined(__loongarch64) && !defined(_M_LOONGARCH64) #define _M_LOONGARCH64 1 #elif defined(__s390x__) && !defined(_M_S390X) #define _M_S390X 1 #endif #if defined(_M_IX86) && !defined(HOST_X86) #define HOST_X86 #elif defined(_M_AMD64) && !defined(HOST_AMD64) #define HOST_AMD64 #elif defined(_M_ARM) && !defined(HOST_ARM) #define HOST_ARM #elif defined(_M_ARM64) && !defined(HOST_ARM64) #define HOST_ARM64 #elif defined(_M_LOONGARCH64) && !defined(HOST_LOONGARCH64) #define HOST_LOONGARCH64 #elif defined(_M_S390X) && !defined(HOST_S390X) #define HOST_S390X #endif #endif // !_MSC_VER /******************* ABI-specific glue *******************************/ #define MAX_PATH 260 #define _MAX_PATH 260 #define _MAX_DRIVE 3 /* max. length of drive component */ #define _MAX_DIR 256 /* max. length of path component */ #define _MAX_FNAME 256 /* max. length of file name component */ #define _MAX_EXT 256 /* max. length of extension component */ // In some Win32 APIs MAX_PATH is used for file names (even though 256 is the normal file system limit) // use _MAX_PATH_FNAME to indicate these cases #define MAX_PATH_FNAME MAX_PATH #define MAX_LONGPATH 1024 /* max. length of full pathname */ #define MAXSHORT 0x7fff #define MAXLONG 0x7fffffff #define MAXCHAR 0x7f #define MAXDWORD 0xffffffff // Sorting IDs. // // Note that the named locale APIs (eg CompareStringExEx) are recommended. // #define LANG_ENGLISH 0x09 /******************* Compiler-specific glue *******************************/ #ifndef THROW_DECL #if defined(_MSC_VER) || !defined(__cplusplus) #define THROW_DECL #else #define THROW_DECL throw() #endif // !_MSC_VER #endif // !THROW_DECL #ifdef __sun #define MATH_THROW_DECL #else #define MATH_THROW_DECL THROW_DECL #endif #if defined(_MSC_VER) #define DECLSPEC_ALIGN(x) __declspec(align(x)) #else #define DECLSPEC_ALIGN(x) __attribute__ ((aligned(x))) #endif #define DECLSPEC_NORETURN PAL_NORETURN #ifdef __clang_analyzer__ #define ANALYZER_NORETURN __attribute((analyzer_noreturn)) #else #define ANALYZER_NORETURN #endif #define EMPTY_BASES_DECL #if !defined(_MSC_VER) || defined(SOURCE_FORMATTING) #define __assume(x) (void)0 #define __annotation(x) #endif //!MSC_VER #define UNALIGNED #ifndef FORCEINLINE #if _MSC_VER < 1200 #define FORCEINLINE inline #else #define FORCEINLINE __forceinline #endif #endif #ifndef NOOPT_ATTRIBUTE #if defined(__llvm__) #define NOOPT_ATTRIBUTE optnone #elif defined(__GNUC__) #define NOOPT_ATTRIBUTE optimize("O0") #endif #endif #ifndef NODEBUG_ATTRIBUTE #if defined(__llvm__) #define NODEBUG_ATTRIBUTE __nodebug__ #elif defined(__GNUC__) #define NODEBUG_ATTRIBUTE __artificial__ #endif #endif #ifndef __has_cpp_attribute #define __has_cpp_attribute(x) (0) #endif #ifndef FALLTHROUGH #if __has_cpp_attribute(fallthrough) #define FALLTHROUGH [[fallthrough]] #else // __has_cpp_attribute(fallthrough) #define FALLTHROUGH #endif // __has_cpp_attribute(fallthrough) #endif // FALLTHROUGH #ifndef PAL_STDCPP_COMPAT #if __GNUC__ typedef __builtin_va_list va_list; /* We should consider if the va_arg definition here is actually necessary. Could we use the standard va_arg definition? */ #define va_start __builtin_va_start #define va_arg __builtin_va_arg #define va_copy __builtin_va_copy #define va_end __builtin_va_end #define VOID void #else // __GNUC__ typedef char * va_list; #define _INTSIZEOF(n) ( (sizeof(n) + sizeof(int) - 1) & ~(sizeof(int) - 1) ) #if _MSC_VER >= 1400 #ifdef __cplusplus #define _ADDRESSOF(v) ( &reinterpret_cast<const char &>(v) ) #else #define _ADDRESSOF(v) ( &(v) ) #endif #define _crt_va_start(ap,v) ( ap = (va_list)_ADDRESSOF(v) + _INTSIZEOF(v) ) #define _crt_va_arg(ap,t) ( *(t *)((ap += _INTSIZEOF(t)) - _INTSIZEOF(t)) ) #define _crt_va_end(ap) ( ap = (va_list)0 ) #define va_start _crt_va_start #define va_arg _crt_va_arg #define va_end _crt_va_end #else // _MSC_VER #define va_start(ap,v) (ap = (va_list) (&(v)) + _INTSIZEOF(v)) #define va_arg(ap,t) ( *(t *)((ap += _INTSIZEOF(t)) - _INTSIZEOF(t)) ) #define va_end(ap) #endif // _MSC_VER #define va_copy(dest,src) (dest = src) #endif // __GNUC__ #define CHAR_BIT 8 #define SCHAR_MIN (-128) #define SCHAR_MAX 127 #define UCHAR_MAX 0xff #define SHRT_MIN (-32768) #define SHRT_MAX 32767 #define USHRT_MAX 0xffff #define INT_MIN (-2147483647 - 1) #define INT_MAX 2147483647 #define UINT_MAX 0xffffffff // LONG_MIN, LONG_MAX, ULONG_MAX -- use INT32_MIN etc. instead. #define FLT_MAX 3.402823466e+38F #define DBL_MAX 1.7976931348623157e+308 #endif // !PAL_STDCPP_COMPAT /******************* PAL-Specific Entrypoints *****************************/ #define IsDebuggerPresent PAL_IsDebuggerPresent PALIMPORT BOOL PALAPI PAL_IsDebuggerPresent(); /* minimum signed 64 bit value */ #define _I64_MIN (I64(-9223372036854775807) - 1) /* maximum signed 64 bit value */ #define _I64_MAX I64(9223372036854775807) /* maximum unsigned 64 bit value */ #define _UI64_MAX UI64(0xffffffffffffffff) #define _I8_MAX SCHAR_MAX #define _I8_MIN SCHAR_MIN #define _I16_MAX SHRT_MAX #define _I16_MIN SHRT_MIN #define _I32_MAX INT_MAX #define _I32_MIN INT_MIN #define _UI8_MAX UCHAR_MAX #define _UI8_MIN UCHAR_MIN #define _UI16_MAX USHRT_MAX #define _UI16_MIN USHRT_MIN #define _UI32_MAX UINT_MAX #define _UI32_MIN UINT_MIN #undef NULL #if defined(__cplusplus) #define NULL 0 #else #define NULL ((PVOID)0) #endif #if defined(PAL_STDCPP_COMPAT) && !defined(__cplusplus) #define nullptr NULL #endif // defined(PAL_STDCPP_COMPAT) && !defined(__cplusplus) #ifndef PAL_STDCPP_COMPAT typedef __int64 time_t; #define _TIME_T_DEFINED #endif // !PAL_STDCPP_COMPAT #define DLL_PROCESS_ATTACH 1 #define DLL_THREAD_ATTACH 2 #define DLL_THREAD_DETACH 3 #define DLL_PROCESS_DETACH 0 #define PAL_INITIALIZE_NONE 0x00 #define PAL_INITIALIZE_SYNC_THREAD 0x01 #define PAL_INITIALIZE_EXEC_ALLOCATOR 0x02 #define PAL_INITIALIZE_STD_HANDLES 0x04 #define PAL_INITIALIZE_REGISTER_SIGTERM_HANDLER 0x08 #define PAL_INITIALIZE_DEBUGGER_EXCEPTIONS 0x10 #define PAL_INITIALIZE_ENSURE_STACK_SIZE 0x20 #define PAL_INITIALIZE_REGISTER_SIGNALS 0x40 #define PAL_INITIALIZE_REGISTER_ACTIVATION_SIGNAL 0x80 // PAL_Initialize() flags #define PAL_INITIALIZE (PAL_INITIALIZE_SYNC_THREAD | \ PAL_INITIALIZE_STD_HANDLES) // PAL_InitializeDLL() flags - don't start any of the helper threads or register any exceptions #define PAL_INITIALIZE_DLL PAL_INITIALIZE_NONE // PAL_InitializeCoreCLR() flags #define PAL_INITIALIZE_CORECLR (PAL_INITIALIZE | \ PAL_INITIALIZE_EXEC_ALLOCATOR | \ PAL_INITIALIZE_REGISTER_SIGTERM_HANDLER | \ PAL_INITIALIZE_DEBUGGER_EXCEPTIONS | \ PAL_INITIALIZE_ENSURE_STACK_SIZE | \ PAL_INITIALIZE_REGISTER_SIGNALS | \ PAL_INITIALIZE_REGISTER_ACTIVATION_SIGNAL) typedef DWORD (PALAPI_NOEXPORT *PTHREAD_START_ROUTINE)(LPVOID lpThreadParameter); typedef PTHREAD_START_ROUTINE LPTHREAD_START_ROUTINE; /******************* PAL-Specific Entrypoints *****************************/ PALIMPORT int PALAPI PAL_Initialize( int argc, char * const argv[]); PALIMPORT void PALAPI PAL_InitializeWithFlags( DWORD flags); PALIMPORT int PALAPI PAL_InitializeDLL(); PALIMPORT void PALAPI PAL_SetInitializeDLLFlags( DWORD flags); PALIMPORT DWORD PALAPI PAL_InitializeCoreCLR( const char *szExePath, BOOL runningInExe); /// <summary> /// This function shuts down PAL WITHOUT exiting the current process. /// </summary> PALIMPORT void PALAPI PAL_Shutdown( void); /// <summary> /// This function shuts down PAL and exits the current process. /// </summary> PALIMPORT void PALAPI PAL_Terminate( void); /// <summary> /// This function shuts down PAL and exits the current process with /// the specified exit code. /// </summary> PALIMPORT void PALAPI PAL_TerminateEx( int exitCode); typedef VOID (*PSHUTDOWN_CALLBACK)(bool isExecutingOnAltStack); PALIMPORT VOID PALAPI PAL_SetShutdownCallback( IN PSHUTDOWN_CALLBACK callback); // Must be the same as the copy in excep.h and the WriteDumpFlags enum in the diagnostics repo enum { GenerateDumpFlagsNone = 0x00, GenerateDumpFlagsLoggingEnabled = 0x01, GenerateDumpFlagsVerboseLoggingEnabled = 0x02, GenerateDumpFlagsCrashReportEnabled = 0x04 }; PALIMPORT BOOL PALAPI PAL_GenerateCoreDump( IN LPCSTR dumpName, IN INT dumpType, IN ULONG32 flags); typedef VOID (*PPAL_STARTUP_CALLBACK)( char *modulePath, HMODULE hModule, PVOID parameter); PALIMPORT DWORD PALAPI PAL_RegisterForRuntimeStartup( IN DWORD dwProcessId, IN LPCWSTR lpApplicationGroupId, IN PPAL_STARTUP_CALLBACK pfnCallback, IN PVOID parameter, OUT PVOID *ppUnregisterToken); PALIMPORT DWORD PALAPI PAL_UnregisterForRuntimeStartup( IN PVOID pUnregisterToken); PALIMPORT BOOL PALAPI PAL_NotifyRuntimeStarted(); PALIMPORT LPCSTR PALAPI PAL_GetApplicationGroupId(); static const unsigned int MAX_DEBUGGER_TRANSPORT_PIPE_NAME_LENGTH = MAX_PATH; PALIMPORT VOID PALAPI PAL_GetTransportName( const unsigned int MAX_TRANSPORT_NAME_LENGTH, OUT char *name, IN const char *prefix, IN DWORD id, IN const char *applicationGroupId, IN const char *suffix); PALIMPORT VOID PALAPI PAL_GetTransportPipeName( OUT char *name, IN DWORD id, IN const char *applicationGroupId, IN const char *suffix); PALIMPORT void PALAPI PAL_IgnoreProfileSignal(int signalNum); PALIMPORT HINSTANCE PALAPI PAL_RegisterModule( IN LPCSTR lpLibFileName); PALIMPORT VOID PALAPI PAL_UnregisterModule( IN HINSTANCE hInstance); PALIMPORT VOID PALAPI PAL_Random( IN OUT LPVOID lpBuffer, IN DWORD dwLength); PALIMPORT BOOL PALAPI PAL_OpenProcessMemory( IN DWORD processId, OUT DWORD* pHandle ); PALIMPORT VOID PALAPI PAL_CloseProcessMemory( IN DWORD handle ); PALIMPORT BOOL PALAPI PAL_ReadProcessMemory( IN DWORD handle, IN ULONG64 address, IN LPVOID buffer, IN SIZE_T size, OUT SIZE_T* numberOfBytesRead ); PALIMPORT BOOL PALAPI PAL_ProbeMemory( PVOID pBuffer, DWORD cbBuffer, BOOL fWriteAccess); PALIMPORT int PALAPI // Start the jitdump file PAL_PerfJitDump_Start(const char* path); PALIMPORT int PALAPI // Log a method to the jitdump file. PAL_PerfJitDump_LogMethod(void* pCode, size_t codeSize, const char* symbol, void* debugInfo, void* unwindInfo); PALIMPORT int PALAPI // Finish the jitdump file PAL_PerfJitDump_Finish(); /******************* winuser.h Entrypoints *******************************/ #define MB_OK 0x00000000L #define MB_OKCANCEL 0x00000001L #define MB_ABORTRETRYIGNORE 0x00000002L #define MB_YESNO 0x00000004L #define MB_RETRYCANCEL 0x00000005L #define MB_ICONHAND 0x00000010L #define MB_ICONQUESTION 0x00000020L #define MB_ICONEXCLAMATION 0x00000030L #define MB_ICONASTERISK 0x00000040L #define MB_ICONINFORMATION MB_ICONASTERISK #define MB_ICONSTOP MB_ICONHAND #define MB_ICONERROR MB_ICONHAND #define MB_DEFBUTTON1 0x00000000L #define MB_DEFBUTTON2 0x00000100L #define MB_DEFBUTTON3 0x00000200L #define MB_SYSTEMMODAL 0x00001000L #define MB_TASKMODAL 0x00002000L #define MB_SETFOREGROUND 0x00010000L #define MB_TOPMOST 0x00040000L #define MB_NOFOCUS 0x00008000L #define MB_DEFAULT_DESKTOP_ONLY 0x00020000L // Note: this is the NT 4.0 and greater value. #define MB_SERVICE_NOTIFICATION 0x00200000L #define MB_TYPEMASK 0x0000000FL #define MB_ICONMASK 0x000000F0L #define MB_DEFMASK 0x00000F00L #define IDOK 1 #define IDCANCEL 2 #define IDABORT 3 #define IDRETRY 4 #define IDIGNORE 5 #define IDYES 6 #define IDNO 7 PALIMPORT int PALAPI MessageBoxW( IN LPVOID hWnd, // NOTE: diff from winuser.h IN LPCWSTR lpText, IN LPCWSTR lpCaption, IN UINT uType); #ifdef UNICODE #define MessageBox MessageBoxW #else #define MessageBox MessageBoxA #endif // From win32.h #ifndef _CRTIMP #ifdef __GNUC__ #define _CRTIMP #else // __GNUC__ #define _CRTIMP __declspec(dllimport) #endif // __GNUC__ #endif // _CRTIMP /******************* winbase.h Entrypoints and defines ************************/ typedef struct _SECURITY_ATTRIBUTES { DWORD nLength; LPVOID lpSecurityDescriptor; BOOL bInheritHandle; } SECURITY_ATTRIBUTES, *PSECURITY_ATTRIBUTES, *LPSECURITY_ATTRIBUTES; #define _SH_DENYWR 0x20 /* deny write mode */ #define FILE_READ_DATA ( 0x0001 ) // file & pipe #define FILE_APPEND_DATA ( 0x0004 ) // file #define GENERIC_READ (0x80000000L) #define GENERIC_WRITE (0x40000000L) #define FILE_SHARE_READ 0x00000001 #define FILE_SHARE_WRITE 0x00000002 #define FILE_SHARE_DELETE 0x00000004 #define CREATE_NEW 1 #define CREATE_ALWAYS 2 #define OPEN_EXISTING 3 #define OPEN_ALWAYS 4 #define TRUNCATE_EXISTING 5 #define FILE_ATTRIBUTE_READONLY 0x00000001 #define FILE_ATTRIBUTE_HIDDEN 0x00000002 #define FILE_ATTRIBUTE_SYSTEM 0x00000004 #define FILE_ATTRIBUTE_DIRECTORY 0x00000010 #define FILE_ATTRIBUTE_ARCHIVE 0x00000020 #define FILE_ATTRIBUTE_DEVICE 0x00000040 #define FILE_ATTRIBUTE_NORMAL 0x00000080 #define FILE_FLAG_WRITE_THROUGH 0x80000000 #define FILE_FLAG_NO_BUFFERING 0x20000000 #define FILE_FLAG_RANDOM_ACCESS 0x10000000 #define FILE_FLAG_SEQUENTIAL_SCAN 0x08000000 #define FILE_FLAG_BACKUP_SEMANTICS 0x02000000 #define FILE_BEGIN 0 #define FILE_CURRENT 1 #define FILE_END 2 #define STILL_ACTIVE (0x00000103L) #define INVALID_SET_FILE_POINTER ((DWORD)-1) PALIMPORT HANDLE PALAPI CreateFileW( IN LPCWSTR lpFileName, IN DWORD dwDesiredAccess, IN DWORD dwShareMode, IN LPSECURITY_ATTRIBUTES lpSecurityAttributes, IN DWORD dwCreationDisposition, IN DWORD dwFlagsAndAttributes, IN HANDLE hTemplateFile); #ifdef UNICODE #define CreateFile CreateFileW #else #define CreateFile CreateFileA #endif PALIMPORT DWORD PALAPI SearchPathW( IN LPCWSTR lpPath, IN LPCWSTR lpFileName, IN LPCWSTR lpExtension, IN DWORD nBufferLength, OUT LPWSTR lpBuffer, OUT LPWSTR *lpFilePart ); #define SearchPath SearchPathW PALIMPORT BOOL PALAPI CopyFileW( IN LPCWSTR lpExistingFileName, IN LPCWSTR lpNewFileName, IN BOOL bFailIfExists); #ifdef UNICODE #define CopyFile CopyFileW #else #define CopyFile CopyFileA #endif PALIMPORT BOOL PALAPI DeleteFileW( IN LPCWSTR lpFileName); #ifdef UNICODE #define DeleteFile DeleteFileW #else #define DeleteFile DeleteFileA #endif #define MOVEFILE_REPLACE_EXISTING 0x00000001 #define MOVEFILE_COPY_ALLOWED 0x00000002 PALIMPORT BOOL PALAPI MoveFileExW( IN LPCWSTR lpExistingFileName, IN LPCWSTR lpNewFileName, IN DWORD dwFlags); #ifdef UNICODE #define MoveFileEx MoveFileExW #else #define MoveFileEx MoveFileExA #endif typedef struct _BY_HANDLE_FILE_INFORMATION { DWORD dwFileAttributes; FILETIME ftCreationTime; FILETIME ftLastAccessTime; FILETIME ftLastWriteTime; DWORD dwVolumeSerialNumber; DWORD nFileSizeHigh; DWORD nFileSizeLow; DWORD nNumberOfLinks; DWORD nFileIndexHigh; DWORD nFileIndexLow; } BY_HANDLE_FILE_INFORMATION, *PBY_HANDLE_FILE_INFORMATION, *LPBY_HANDLE_FILE_INFORMATION; typedef struct _WIN32_FIND_DATAA { DWORD dwFileAttributes; FILETIME ftCreationTime; FILETIME ftLastAccessTime; FILETIME ftLastWriteTime; DWORD nFileSizeHigh; DWORD nFileSizeLow; DWORD dwReserved0; DWORD dwReserved1; CHAR cFileName[ MAX_PATH_FNAME ]; CHAR cAlternateFileName[ 14 ]; } WIN32_FIND_DATAA, *PWIN32_FIND_DATAA, *LPWIN32_FIND_DATAA; typedef struct _WIN32_FIND_DATAW { DWORD dwFileAttributes; FILETIME ftCreationTime; FILETIME ftLastAccessTime; FILETIME ftLastWriteTime; DWORD nFileSizeHigh; DWORD nFileSizeLow; DWORD dwReserved0; DWORD dwReserved1; WCHAR cFileName[ MAX_PATH_FNAME ]; WCHAR cAlternateFileName[ 14 ]; } WIN32_FIND_DATAW, *PWIN32_FIND_DATAW, *LPWIN32_FIND_DATAW; #ifdef UNICODE typedef WIN32_FIND_DATAW WIN32_FIND_DATA; typedef PWIN32_FIND_DATAW PWIN32_FIND_DATA; typedef LPWIN32_FIND_DATAW LPWIN32_FIND_DATA; #else typedef WIN32_FIND_DATAA WIN32_FIND_DATA; typedef PWIN32_FIND_DATAA PWIN32_FIND_DATA; typedef LPWIN32_FIND_DATAA LPWIN32_FIND_DATA; #endif PALIMPORT HANDLE PALAPI FindFirstFileW( IN LPCWSTR lpFileName, OUT LPWIN32_FIND_DATAW lpFindFileData); #ifdef UNICODE #define FindFirstFile FindFirstFileW #else #define FindFirstFile FindFirstFileA #endif PALIMPORT BOOL PALAPI FindNextFileW( IN HANDLE hFindFile, OUT LPWIN32_FIND_DATAW lpFindFileData); #ifdef UNICODE #define FindNextFile FindNextFileW #else #define FindNextFile FindNextFileA #endif PALIMPORT BOOL PALAPI FindClose( IN OUT HANDLE hFindFile); PALIMPORT DWORD PALAPI GetFileAttributesW( IN LPCWSTR lpFileName); #ifdef UNICODE #define GetFileAttributes GetFileAttributesW #else #define GetFileAttributes GetFileAttributesA #endif typedef enum _GET_FILEEX_INFO_LEVELS { GetFileExInfoStandard } GET_FILEEX_INFO_LEVELS; typedef enum _FINDEX_INFO_LEVELS { FindExInfoStandard, FindExInfoBasic, FindExInfoMaxInfoLevel } FINDEX_INFO_LEVELS; typedef enum _FINDEX_SEARCH_OPS { FindExSearchNameMatch, FindExSearchLimitToDirectories, FindExSearchLimitToDevices, FindExSearchMaxSearchOp } FINDEX_SEARCH_OPS; typedef struct _WIN32_FILE_ATTRIBUTE_DATA { DWORD dwFileAttributes; FILETIME ftCreationTime; FILETIME ftLastAccessTime; FILETIME ftLastWriteTime; DWORD nFileSizeHigh; DWORD nFileSizeLow; } WIN32_FILE_ATTRIBUTE_DATA, *LPWIN32_FILE_ATTRIBUTE_DATA; PALIMPORT BOOL PALAPI GetFileAttributesExW( IN LPCWSTR lpFileName, IN GET_FILEEX_INFO_LEVELS fInfoLevelId, OUT LPVOID lpFileInformation); #ifdef UNICODE #define GetFileAttributesEx GetFileAttributesExW #endif typedef struct _OVERLAPPED { ULONG_PTR Internal; ULONG_PTR InternalHigh; DWORD Offset; DWORD OffsetHigh; HANDLE hEvent; } OVERLAPPED, *LPOVERLAPPED; PALIMPORT BOOL PALAPI WriteFile( IN HANDLE hFile, IN LPCVOID lpBuffer, IN DWORD nNumberOfBytesToWrite, OUT LPDWORD lpNumberOfBytesWritten, IN LPOVERLAPPED lpOverlapped); PALIMPORT BOOL PALAPI ReadFile( IN HANDLE hFile, OUT LPVOID lpBuffer, IN DWORD nNumberOfBytesToRead, OUT LPDWORD lpNumberOfBytesRead, IN LPOVERLAPPED lpOverlapped); #define STD_INPUT_HANDLE ((DWORD)-10) #define STD_OUTPUT_HANDLE ((DWORD)-11) #define STD_ERROR_HANDLE ((DWORD)-12) PALIMPORT HANDLE PALAPI GetStdHandle( IN DWORD nStdHandle); PALIMPORT BOOL PALAPI SetEndOfFile( IN HANDLE hFile); PALIMPORT DWORD PALAPI SetFilePointer( IN HANDLE hFile, IN LONG lDistanceToMove, IN PLONG lpDistanceToMoveHigh, IN DWORD dwMoveMethod); PALIMPORT BOOL PALAPI SetFilePointerEx( IN HANDLE hFile, IN LARGE_INTEGER liDistanceToMove, OUT PLARGE_INTEGER lpNewFilePointer, IN DWORD dwMoveMethod); PALIMPORT DWORD PALAPI GetFileSize( IN HANDLE hFile, OUT LPDWORD lpFileSizeHigh); PALIMPORT BOOL PALAPI GetFileSizeEx( IN HANDLE hFile, OUT PLARGE_INTEGER lpFileSize); PALIMPORT VOID PALAPI GetSystemTimeAsFileTime( OUT LPFILETIME lpSystemTimeAsFileTime); typedef struct _SYSTEMTIME { WORD wYear; WORD wMonth; WORD wDayOfWeek; WORD wDay; WORD wHour; WORD wMinute; WORD wSecond; WORD wMilliseconds; } SYSTEMTIME, *PSYSTEMTIME, *LPSYSTEMTIME; PALIMPORT VOID PALAPI GetSystemTime( OUT LPSYSTEMTIME lpSystemTime); PALIMPORT BOOL PALAPI FileTimeToSystemTime( IN CONST FILETIME *lpFileTime, OUT LPSYSTEMTIME lpSystemTime); PALIMPORT BOOL PALAPI FlushFileBuffers( IN HANDLE hFile); PALIMPORT UINT PALAPI GetConsoleOutputCP(); PALIMPORT DWORD PALAPI GetFullPathNameW( IN LPCWSTR lpFileName, IN DWORD nBufferLength, OUT LPWSTR lpBuffer, OUT LPWSTR *lpFilePart); #ifdef UNICODE #define GetFullPathName GetFullPathNameW #else #define GetFullPathName GetFullPathNameA #endif PALIMPORT UINT PALAPI GetTempFileNameW( IN LPCWSTR lpPathName, IN LPCWSTR lpPrefixString, IN UINT uUnique, OUT LPWSTR lpTempFileName); #ifdef UNICODE #define GetTempFileName GetTempFileNameW #else #define GetTempFileName GetTempFileNameA #endif PALIMPORT DWORD PALAPI GetTempPathW( IN DWORD nBufferLength, OUT LPWSTR lpBuffer); PALIMPORT DWORD PALAPI GetTempPathA( IN DWORD nBufferLength, OUT LPSTR lpBuffer); #ifdef UNICODE #define GetTempPath GetTempPathW #else #define GetTempPath GetTempPathA #endif PALIMPORT DWORD PALAPI GetCurrentDirectoryW( IN DWORD nBufferLength, OUT LPWSTR lpBuffer); #ifdef UNICODE #define GetCurrentDirectory GetCurrentDirectoryW #else #define GetCurrentDirectory GetCurrentDirectoryA #endif PALIMPORT HANDLE PALAPI CreateSemaphoreExW( IN LPSECURITY_ATTRIBUTES lpSemaphoreAttributes, IN LONG lInitialCount, IN LONG lMaximumCount, IN LPCWSTR lpName, IN /*_Reserved_*/ DWORD dwFlags, IN DWORD dwDesiredAccess); PALIMPORT HANDLE PALAPI OpenSemaphoreW( IN DWORD dwDesiredAccess, IN BOOL bInheritHandle, IN LPCWSTR lpName); #define CreateSemaphoreEx CreateSemaphoreExW PALIMPORT BOOL PALAPI ReleaseSemaphore( IN HANDLE hSemaphore, IN LONG lReleaseCount, OUT LPLONG lpPreviousCount); PALIMPORT HANDLE PALAPI CreateEventW( IN LPSECURITY_ATTRIBUTES lpEventAttributes, IN BOOL bManualReset, IN BOOL bInitialState, IN LPCWSTR lpName); PALIMPORT HANDLE PALAPI CreateEventExW( IN LPSECURITY_ATTRIBUTES lpEventAttributes, IN LPCWSTR lpName, IN DWORD dwFlags, IN DWORD dwDesiredAccess); // CreateEventExW: dwFlags #define CREATE_EVENT_MANUAL_RESET ((DWORD)0x1) #define CREATE_EVENT_INITIAL_SET ((DWORD)0x2) #define CreateEvent CreateEventW PALIMPORT BOOL PALAPI SetEvent( IN HANDLE hEvent); PALIMPORT BOOL PALAPI ResetEvent( IN HANDLE hEvent); PALIMPORT HANDLE PALAPI OpenEventW( IN DWORD dwDesiredAccess, IN BOOL bInheritHandle, IN LPCWSTR lpName); #ifdef UNICODE #define OpenEvent OpenEventW #endif PALIMPORT HANDLE PALAPI CreateMutexW( IN LPSECURITY_ATTRIBUTES lpMutexAttributes, IN BOOL bInitialOwner, IN LPCWSTR lpName); PALIMPORT HANDLE PALAPI CreateMutexExW( IN LPSECURITY_ATTRIBUTES lpMutexAttributes, IN LPCWSTR lpName, IN DWORD dwFlags, IN DWORD dwDesiredAccess); // CreateMutexExW: dwFlags #define CREATE_MUTEX_INITIAL_OWNER ((DWORD)0x1) #define CreateMutex CreateMutexW PALIMPORT HANDLE PALAPI OpenMutexW( IN DWORD dwDesiredAccess, IN BOOL bInheritHandle, IN LPCWSTR lpName); #ifdef UNICODE #define OpenMutex OpenMutexW #endif PALIMPORT BOOL PALAPI ReleaseMutex( IN HANDLE hMutex); PALIMPORT DWORD PALAPI GetCurrentProcessId(); PALIMPORT DWORD PALAPI GetCurrentSessionId(); PALIMPORT HANDLE PALAPI GetCurrentProcess(); PALIMPORT DWORD PALAPI GetCurrentThreadId(); PALIMPORT size_t PALAPI PAL_GetCurrentOSThreadId(); // To work around multiply-defined symbols in the Carbon framework. #define GetCurrentThread PAL_GetCurrentThread PALIMPORT HANDLE PALAPI GetCurrentThread(); #define STARTF_USESTDHANDLES 0x00000100 typedef struct _STARTUPINFOW { DWORD cb; LPWSTR lpReserved_PAL_Undefined; LPWSTR lpDesktop_PAL_Undefined; LPWSTR lpTitle_PAL_Undefined; DWORD dwX_PAL_Undefined; DWORD dwY_PAL_Undefined; DWORD dwXSize_PAL_Undefined; DWORD dwYSize_PAL_Undefined; DWORD dwXCountChars_PAL_Undefined; DWORD dwYCountChars_PAL_Undefined; DWORD dwFillAttribute_PAL_Undefined; DWORD dwFlags; WORD wShowWindow_PAL_Undefined; WORD cbReserved2_PAL_Undefined; LPBYTE lpReserved2_PAL_Undefined; HANDLE hStdInput; HANDLE hStdOutput; HANDLE hStdError; } STARTUPINFOW, *LPSTARTUPINFOW; typedef STARTUPINFOW STARTUPINFO; typedef LPSTARTUPINFOW LPSTARTUPINFO; #define CREATE_NEW_CONSOLE 0x00000010 #define NORMAL_PRIORITY_CLASS 0x00000020 typedef struct _PROCESS_INFORMATION { HANDLE hProcess; HANDLE hThread; DWORD dwProcessId; DWORD dwThreadId_PAL_Undefined; } PROCESS_INFORMATION, *PPROCESS_INFORMATION, *LPPROCESS_INFORMATION; PALIMPORT BOOL PALAPI CreateProcessW( IN LPCWSTR lpApplicationName, IN LPWSTR lpCommandLine, IN LPSECURITY_ATTRIBUTES lpProcessAttributes, IN LPSECURITY_ATTRIBUTES lpThreadAttributes, IN BOOL bInheritHandles, IN DWORD dwCreationFlags, IN LPVOID lpEnvironment, IN LPCWSTR lpCurrentDirectory, IN LPSTARTUPINFOW lpStartupInfo, OUT LPPROCESS_INFORMATION lpProcessInformation); #define CreateProcess CreateProcessW PALIMPORT PAL_NORETURN VOID PALAPI ExitProcess( IN UINT uExitCode); PALIMPORT BOOL PALAPI TerminateProcess( IN HANDLE hProcess, IN UINT uExitCode); PALIMPORT BOOL PALAPI GetExitCodeProcess( IN HANDLE hProcess, IN LPDWORD lpExitCode); PALIMPORT BOOL PALAPI GetProcessTimes( IN HANDLE hProcess, OUT LPFILETIME lpCreationTime, OUT LPFILETIME lpExitTime, OUT LPFILETIME lpKernelTime, OUT LPFILETIME lpUserTime); #define MAXIMUM_WAIT_OBJECTS 64 #define WAIT_OBJECT_0 0 #define WAIT_ABANDONED 0x00000080 #define WAIT_ABANDONED_0 0x00000080 #define WAIT_TIMEOUT 258 #define WAIT_FAILED ((DWORD)0xFFFFFFFF) #define INFINITE 0xFFFFFFFF // Infinite timeout PALIMPORT DWORD PALAPI WaitForSingleObject( IN HANDLE hHandle, IN DWORD dwMilliseconds); PALIMPORT DWORD PALAPI PAL_WaitForSingleObjectPrioritized( IN HANDLE hHandle, IN DWORD dwMilliseconds); PALIMPORT DWORD PALAPI WaitForSingleObjectEx( IN HANDLE hHandle, IN DWORD dwMilliseconds, IN BOOL bAlertable); PALIMPORT DWORD PALAPI WaitForMultipleObjects( IN DWORD nCount, IN CONST HANDLE *lpHandles, IN BOOL bWaitAll, IN DWORD dwMilliseconds); PALIMPORT DWORD PALAPI WaitForMultipleObjectsEx( IN DWORD nCount, IN CONST HANDLE *lpHandles, IN BOOL bWaitAll, IN DWORD dwMilliseconds, IN BOOL bAlertable); PALIMPORT DWORD PALAPI SignalObjectAndWait( IN HANDLE hObjectToSignal, IN HANDLE hObjectToWaitOn, IN DWORD dwMilliseconds, IN BOOL bAlertable); #define DUPLICATE_CLOSE_SOURCE 0x00000001 #define DUPLICATE_SAME_ACCESS 0x00000002 PALIMPORT BOOL PALAPI DuplicateHandle( IN HANDLE hSourceProcessHandle, IN HANDLE hSourceHandle, IN HANDLE hTargetProcessHandle, OUT LPHANDLE lpTargetHandle, IN DWORD dwDesiredAccess, IN BOOL bInheritHandle, IN DWORD dwOptions); PALIMPORT VOID PALAPI Sleep( IN DWORD dwMilliseconds); PALIMPORT DWORD PALAPI SleepEx( IN DWORD dwMilliseconds, IN BOOL bAlertable); PALIMPORT BOOL PALAPI SwitchToThread(); #define DEBUG_PROCESS 0x00000001 #define DEBUG_ONLY_THIS_PROCESS 0x00000002 #define CREATE_SUSPENDED 0x00000004 #define STACK_SIZE_PARAM_IS_A_RESERVATION 0x00010000 PALIMPORT HANDLE PALAPI CreateThread( IN LPSECURITY_ATTRIBUTES lpThreadAttributes, IN DWORD dwStackSize, IN LPTHREAD_START_ROUTINE lpStartAddress, IN LPVOID lpParameter, IN DWORD dwCreationFlags, OUT LPDWORD lpThreadId); PALIMPORT HANDLE PALAPI PAL_CreateThread64( IN LPSECURITY_ATTRIBUTES lpThreadAttributes, IN DWORD dwStackSize, IN LPTHREAD_START_ROUTINE lpStartAddress, IN LPVOID lpParameter, IN DWORD dwCreationFlags, OUT SIZE_T* pThreadId); PALIMPORT PAL_NORETURN VOID PALAPI ExitThread( IN DWORD dwExitCode); PALIMPORT DWORD PALAPI ResumeThread( IN HANDLE hThread); typedef VOID (PALAPI_NOEXPORT *PAPCFUNC)(ULONG_PTR dwParam); PALIMPORT DWORD PALAPI QueueUserAPC( IN PAPCFUNC pfnAPC, IN HANDLE hThread, IN ULONG_PTR dwData); #ifdef HOST_X86 // // *********************************************************************************** // // NOTE: These context definitions are replicated in ndp/clr/src/debug/inc/DbgTargetContext.h (for the // purposes manipulating contexts from different platforms during remote debugging). Be sure to keep those // definitions in sync if you make any changes here. // // *********************************************************************************** // #define SIZE_OF_80387_REGISTERS 80 #define CONTEXT_i386 0x00010000 #define CONTEXT_CONTROL (CONTEXT_i386 | 0x00000001L) // SS:SP, CS:IP, FLAGS, BP #define CONTEXT_INTEGER (CONTEXT_i386 | 0x00000002L) // AX, BX, CX, DX, SI, DI #define CONTEXT_SEGMENTS (CONTEXT_i386 | 0x00000004L) #define CONTEXT_FLOATING_POINT (CONTEXT_i386 | 0x00000008L) // 387 state #define CONTEXT_DEBUG_REGISTERS (CONTEXT_i386 | 0x00000010L) #define CONTEXT_FULL (CONTEXT_CONTROL | CONTEXT_INTEGER | CONTEXT_SEGMENTS) #define CONTEXT_EXTENDED_REGISTERS (CONTEXT_i386 | 0x00000020L) #define CONTEXT_ALL (CONTEXT_CONTROL | CONTEXT_INTEGER | CONTEXT_SEGMENTS | CONTEXT_FLOATING_POINT | CONTEXT_DEBUG_REGISTERS | CONTEXT_EXTENDED_REGISTERS) #define MAXIMUM_SUPPORTED_EXTENSION 512 #define CONTEXT_XSTATE (CONTEXT_i386 | 0x40L) #define CONTEXT_EXCEPTION_ACTIVE 0x8000000L #define CONTEXT_SERVICE_ACTIVE 0x10000000L #define CONTEXT_EXCEPTION_REQUEST 0x40000000L #define CONTEXT_EXCEPTION_REPORTING 0x80000000L // // This flag is set by the unwinder if it has unwound to a call // site, and cleared whenever it unwinds through a trap frame. // It is used by language-specific exception handlers to help // differentiate exception scopes during dispatching. // #define CONTEXT_UNWOUND_TO_CALL 0x20000000 typedef struct _FLOATING_SAVE_AREA { DWORD ControlWord; DWORD StatusWord; DWORD TagWord; DWORD ErrorOffset; DWORD ErrorSelector; DWORD DataOffset; DWORD DataSelector; BYTE RegisterArea[SIZE_OF_80387_REGISTERS]; DWORD Cr0NpxState; } FLOATING_SAVE_AREA; typedef FLOATING_SAVE_AREA *PFLOATING_SAVE_AREA; typedef struct _CONTEXT { ULONG ContextFlags; ULONG Dr0_PAL_Undefined; ULONG Dr1_PAL_Undefined; ULONG Dr2_PAL_Undefined; ULONG Dr3_PAL_Undefined; ULONG Dr6_PAL_Undefined; ULONG Dr7_PAL_Undefined; FLOATING_SAVE_AREA FloatSave; ULONG SegGs_PAL_Undefined; ULONG SegFs_PAL_Undefined; ULONG SegEs_PAL_Undefined; ULONG SegDs_PAL_Undefined; ULONG Edi; ULONG Esi; ULONG Ebx; ULONG Edx; ULONG Ecx; ULONG Eax; ULONG Ebp; ULONG Eip; ULONG SegCs; ULONG EFlags; ULONG Esp; ULONG SegSs; UCHAR ExtendedRegisters[MAXIMUM_SUPPORTED_EXTENSION]; } CONTEXT, *PCONTEXT, *LPCONTEXT; // To support saving and loading xmm register context we need to know the offset in the ExtendedRegisters // section at which they are stored. This has been determined experimentally since I have found no // documentation thus far but it corresponds to the offset we'd expect if a fxsave instruction was used to // store the regular FP state along with the XMM registers at the start of the extended registers section. // Technically the offset doesn't really matter if no code in the PAL or runtime knows what the offset should // be either (as long as we're consistent across GetThreadContext() and SetThreadContext() and we don't // support any other values in the ExtendedRegisters) but we might as well be as accurate as we can. #define CONTEXT_EXREG_XMM_OFFSET 160 typedef struct _KNONVOLATILE_CONTEXT { DWORD Edi; DWORD Esi; DWORD Ebx; DWORD Ebp; } KNONVOLATILE_CONTEXT, *PKNONVOLATILE_CONTEXT; typedef struct _KNONVOLATILE_CONTEXT_POINTERS { // The ordering of these fields should be aligned with that // of corresponding fields in CONTEXT // // (See FillRegDisplay in inc/regdisp.h for details) PDWORD Edi; PDWORD Esi; PDWORD Ebx; PDWORD Edx; PDWORD Ecx; PDWORD Eax; PDWORD Ebp; } KNONVOLATILE_CONTEXT_POINTERS, *PKNONVOLATILE_CONTEXT_POINTERS; #elif defined(HOST_AMD64) // copied from winnt.h #define CONTEXT_AMD64 0x100000 #define CONTEXT_CONTROL (CONTEXT_AMD64 | 0x1L) #define CONTEXT_INTEGER (CONTEXT_AMD64 | 0x2L) #define CONTEXT_SEGMENTS (CONTEXT_AMD64 | 0x4L) #define CONTEXT_FLOATING_POINT (CONTEXT_AMD64 | 0x8L) #define CONTEXT_DEBUG_REGISTERS (CONTEXT_AMD64 | 0x10L) #define CONTEXT_FULL (CONTEXT_CONTROL | CONTEXT_INTEGER | CONTEXT_FLOATING_POINT) #define CONTEXT_ALL (CONTEXT_CONTROL | CONTEXT_INTEGER | CONTEXT_SEGMENTS | CONTEXT_FLOATING_POINT | CONTEXT_DEBUG_REGISTERS) #define CONTEXT_XSTATE (CONTEXT_AMD64 | 0x40L) #define CONTEXT_EXCEPTION_ACTIVE 0x8000000 #define CONTEXT_SERVICE_ACTIVE 0x10000000 #define CONTEXT_EXCEPTION_REQUEST 0x40000000 #define CONTEXT_EXCEPTION_REPORTING 0x80000000 typedef struct DECLSPEC_ALIGN(16) _M128A { ULONGLONG Low; LONGLONG High; } M128A, *PM128A; typedef struct _XMM_SAVE_AREA32 { WORD ControlWord; WORD StatusWord; BYTE TagWord; BYTE Reserved1; WORD ErrorOpcode; DWORD ErrorOffset; WORD ErrorSelector; WORD Reserved2; DWORD DataOffset; WORD DataSelector; WORD Reserved3; DWORD MxCsr; DWORD MxCsr_Mask; M128A FloatRegisters[8]; M128A XmmRegisters[16]; BYTE Reserved4[96]; } XMM_SAVE_AREA32, *PXMM_SAVE_AREA32; #define LEGACY_SAVE_AREA_LENGTH sizeof(XMM_SAVE_AREA32) // // Context Frame // // This frame has a several purposes: 1) it is used as an argument to // NtContinue, 2) is is used to constuct a call frame for APC delivery, // and 3) it is used in the user level thread creation routines. // // // The flags field within this record controls the contents of a CONTEXT // record. // // If the context record is used as an input parameter, then for each // portion of the context record controlled by a flag whose value is // set, it is assumed that that portion of the context record contains // valid context. If the context record is being used to modify a threads // context, then only that portion of the threads context is modified. // // If the context record is used as an output parameter to capture the // context of a thread, then only those portions of the thread's context // corresponding to set flags will be returned. // // CONTEXT_CONTROL specifies SegSs, Rsp, SegCs, Rip, and EFlags. // // CONTEXT_INTEGER specifies Rax, Rcx, Rdx, Rbx, Rbp, Rsi, Rdi, and R8-R15. // // CONTEXT_SEGMENTS specifies SegDs, SegEs, SegFs, and SegGs. // // CONTEXT_DEBUG_REGISTERS specifies Dr0-Dr3 and Dr6-Dr7. // // CONTEXT_MMX_REGISTERS specifies the floating point and extended registers // Mm0/St0-Mm7/St7 and Xmm0-Xmm15). // typedef struct DECLSPEC_ALIGN(16) _CONTEXT { // // Register parameter home addresses. // // N.B. These fields are for convience - they could be used to extend the // context record in the future. // DWORD64 P1Home; DWORD64 P2Home; DWORD64 P3Home; DWORD64 P4Home; DWORD64 P5Home; DWORD64 P6Home; // // Control flags. // DWORD ContextFlags; DWORD MxCsr; // // Segment Registers and processor flags. // WORD SegCs; WORD SegDs; WORD SegEs; WORD SegFs; WORD SegGs; WORD SegSs; DWORD EFlags; // // Debug registers // DWORD64 Dr0; DWORD64 Dr1; DWORD64 Dr2; DWORD64 Dr3; DWORD64 Dr6; DWORD64 Dr7; // // Integer registers. // DWORD64 Rax; DWORD64 Rcx; DWORD64 Rdx; DWORD64 Rbx; DWORD64 Rsp; DWORD64 Rbp; DWORD64 Rsi; DWORD64 Rdi; DWORD64 R8; DWORD64 R9; DWORD64 R10; DWORD64 R11; DWORD64 R12; DWORD64 R13; DWORD64 R14; DWORD64 R15; // // Program counter. // DWORD64 Rip; // // Floating point state. // union { XMM_SAVE_AREA32 FltSave; struct { M128A Header[2]; M128A Legacy[8]; M128A Xmm0; M128A Xmm1; M128A Xmm2; M128A Xmm3; M128A Xmm4; M128A Xmm5; M128A Xmm6; M128A Xmm7; M128A Xmm8; M128A Xmm9; M128A Xmm10; M128A Xmm11; M128A Xmm12; M128A Xmm13; M128A Xmm14; M128A Xmm15; }; }; // // Vector registers. // M128A VectorRegister[26]; DWORD64 VectorControl; // // Special debug control registers. // DWORD64 DebugControl; DWORD64 LastBranchToRip; DWORD64 LastBranchFromRip; DWORD64 LastExceptionToRip; DWORD64 LastExceptionFromRip; } CONTEXT, *PCONTEXT, *LPCONTEXT; // // Nonvolatile context pointer record. // typedef struct _KNONVOLATILE_CONTEXT_POINTERS { union { PM128A FloatingContext[16]; struct { PM128A Xmm0; PM128A Xmm1; PM128A Xmm2; PM128A Xmm3; PM128A Xmm4; PM128A Xmm5; PM128A Xmm6; PM128A Xmm7; PM128A Xmm8; PM128A Xmm9; PM128A Xmm10; PM128A Xmm11; PM128A Xmm12; PM128A Xmm13; PM128A Xmm14; PM128A Xmm15; } ; } ; union { PDWORD64 IntegerContext[16]; struct { PDWORD64 Rax; PDWORD64 Rcx; PDWORD64 Rdx; PDWORD64 Rbx; PDWORD64 Rsp; PDWORD64 Rbp; PDWORD64 Rsi; PDWORD64 Rdi; PDWORD64 R8; PDWORD64 R9; PDWORD64 R10; PDWORD64 R11; PDWORD64 R12; PDWORD64 R13; PDWORD64 R14; PDWORD64 R15; } ; } ; } KNONVOLATILE_CONTEXT_POINTERS, *PKNONVOLATILE_CONTEXT_POINTERS; #elif defined(HOST_ARM) #define CONTEXT_ARM 0x00200000L // end_wx86 #define CONTEXT_CONTROL (CONTEXT_ARM | 0x1L) #define CONTEXT_INTEGER (CONTEXT_ARM | 0x2L) #define CONTEXT_FLOATING_POINT (CONTEXT_ARM | 0x4L) #define CONTEXT_DEBUG_REGISTERS (CONTEXT_ARM | 0x8L) #define CONTEXT_FULL (CONTEXT_CONTROL | CONTEXT_INTEGER | CONTEXT_FLOATING_POINT) #define CONTEXT_ALL (CONTEXT_CONTROL | CONTEXT_INTEGER | CONTEXT_FLOATING_POINT | CONTEXT_DEBUG_REGISTERS) #define CONTEXT_EXCEPTION_ACTIVE 0x8000000L #define CONTEXT_SERVICE_ACTIVE 0x10000000L #define CONTEXT_EXCEPTION_REQUEST 0x40000000L #define CONTEXT_EXCEPTION_REPORTING 0x80000000L // // This flag is set by the unwinder if it has unwound to a call // site, and cleared whenever it unwinds through a trap frame. // It is used by language-specific exception handlers to help // differentiate exception scopes during dispatching. // #define CONTEXT_UNWOUND_TO_CALL 0x20000000 // // Specify the number of breakpoints and watchpoints that the OS // will track. Architecturally, ARM supports up to 16. In practice, // however, almost no one implements more than 4 of each. // #define ARM_MAX_BREAKPOINTS 8 #define ARM_MAX_WATCHPOINTS 1 typedef struct _NEON128 { ULONGLONG Low; LONGLONG High; } NEON128, *PNEON128; // // Context Frame // // This frame has a several purposes: 1) it is used as an argument to // NtContinue, 2) it is used to constuct a call frame for APC delivery, // and 3) it is used in the user level thread creation routines. // // // The flags field within this record controls the contents of a CONTEXT // record. // // If the context record is used as an input parameter, then for each // portion of the context record controlled by a flag whose value is // set, it is assumed that that portion of the context record contains // valid context. If the context record is being used to modify a threads // context, then only that portion of the threads context is modified. // // If the context record is used as an output parameter to capture the // context of a thread, then only those portions of the thread's context // corresponding to set flags will be returned. // // CONTEXT_CONTROL specifies Sp, Lr, Pc, and Cpsr // // CONTEXT_INTEGER specifies R0-R12 // // CONTEXT_FLOATING_POINT specifies Q0-Q15 / D0-D31 / S0-S31 // // CONTEXT_DEBUG_REGISTERS specifies up to 16 of DBGBVR, DBGBCR, DBGWVR, // DBGWCR. // typedef struct DECLSPEC_ALIGN(8) _CONTEXT { // // Control flags. // DWORD ContextFlags; // // Integer registers // DWORD R0; DWORD R1; DWORD R2; DWORD R3; DWORD R4; DWORD R5; DWORD R6; DWORD R7; DWORD R8; DWORD R9; DWORD R10; DWORD R11; DWORD R12; // // Control Registers // DWORD Sp; DWORD Lr; DWORD Pc; DWORD Cpsr; // // Floating Point/NEON Registers // DWORD Fpscr; DWORD Padding; union { NEON128 Q[16]; ULONGLONG D[32]; DWORD S[32]; }; // // Debug registers // DWORD Bvr[ARM_MAX_BREAKPOINTS]; DWORD Bcr[ARM_MAX_BREAKPOINTS]; DWORD Wvr[ARM_MAX_WATCHPOINTS]; DWORD Wcr[ARM_MAX_WATCHPOINTS]; DWORD Padding2[2]; } CONTEXT, *PCONTEXT, *LPCONTEXT; // // Nonvolatile context pointer record. // typedef struct _KNONVOLATILE_CONTEXT_POINTERS { PDWORD R4; PDWORD R5; PDWORD R6; PDWORD R7; PDWORD R8; PDWORD R9; PDWORD R10; PDWORD R11; PDWORD Lr; PULONGLONG D8; PULONGLONG D9; PULONGLONG D10; PULONGLONG D11; PULONGLONG D12; PULONGLONG D13; PULONGLONG D14; PULONGLONG D15; } KNONVOLATILE_CONTEXT_POINTERS, *PKNONVOLATILE_CONTEXT_POINTERS; typedef struct _IMAGE_ARM_RUNTIME_FUNCTION_ENTRY { DWORD BeginAddress; DWORD EndAddress; union { DWORD UnwindData; struct { DWORD Flag : 2; DWORD FunctionLength : 11; DWORD Ret : 2; DWORD H : 1; DWORD Reg : 3; DWORD R : 1; DWORD L : 1; DWORD C : 1; DWORD StackAdjust : 10; }; }; } IMAGE_ARM_RUNTIME_FUNCTION_ENTRY, * PIMAGE_ARM_RUNTIME_FUNCTION_ENTRY; #elif defined(HOST_ARM64) #define CONTEXT_ARM64 0x00400000L #define CONTEXT_CONTROL (CONTEXT_ARM64 | 0x1L) #define CONTEXT_INTEGER (CONTEXT_ARM64 | 0x2L) #define CONTEXT_FLOATING_POINT (CONTEXT_ARM64 | 0x4L) #define CONTEXT_DEBUG_REGISTERS (CONTEXT_ARM64 | 0x8L) #define CONTEXT_FULL (CONTEXT_CONTROL | CONTEXT_INTEGER | CONTEXT_FLOATING_POINT) #define CONTEXT_ALL (CONTEXT_CONTROL | CONTEXT_INTEGER | CONTEXT_FLOATING_POINT | CONTEXT_DEBUG_REGISTERS) #define CONTEXT_EXCEPTION_ACTIVE 0x8000000L #define CONTEXT_SERVICE_ACTIVE 0x10000000L #define CONTEXT_EXCEPTION_REQUEST 0x40000000L #define CONTEXT_EXCEPTION_REPORTING 0x80000000L // // This flag is set by the unwinder if it has unwound to a call // site, and cleared whenever it unwinds through a trap frame. // It is used by language-specific exception handlers to help // differentiate exception scopes during dispatching. // #define CONTEXT_UNWOUND_TO_CALL 0x20000000 // // Define initial Cpsr/Fpscr value // #define INITIAL_CPSR 0x10 #define INITIAL_FPSCR 0 // begin_ntoshvp // // Specify the number of breakpoints and watchpoints that the OS // will track. Architecturally, ARM64 supports up to 16. In practice, // however, almost no one implements more than 4 of each. // #define ARM64_MAX_BREAKPOINTS 8 #define ARM64_MAX_WATCHPOINTS 2 // // Context Frame // // This frame has a several purposes: 1) it is used as an argument to // NtContinue, 2) it is used to constuct a call frame for APC delivery, // and 3) it is used in the user level thread creation routines. // // // The flags field within this record controls the contents of a CONTEXT // record. // // If the context record is used as an input parameter, then for each // portion of the context record controlled by a flag whose value is // set, it is assumed that that portion of the context record contains // valid context. If the context record is being used to modify a threads // context, then only that portion of the threads context is modified. // // If the context record is used as an output parameter to capture the // context of a thread, then only those portions of the thread's context // corresponding to set flags will be returned. // // CONTEXT_CONTROL specifies Sp, Lr, Pc, and Cpsr // // CONTEXT_INTEGER specifies R0-R12 // // CONTEXT_FLOATING_POINT specifies Q0-Q15 / D0-D31 / S0-S31 // // CONTEXT_DEBUG_REGISTERS specifies up to 16 of DBGBVR, DBGBCR, DBGWVR, // DBGWCR. // typedef struct _NEON128 { ULONGLONG Low; LONGLONG High; } NEON128, *PNEON128; typedef struct DECLSPEC_ALIGN(16) _CONTEXT { // // Control flags. // /* +0x000 */ DWORD ContextFlags; // // Integer registers // /* +0x004 */ DWORD Cpsr; // NZVF + DAIF + CurrentEL + SPSel /* +0x008 */ union { struct { DWORD64 X0; DWORD64 X1; DWORD64 X2; DWORD64 X3; DWORD64 X4; DWORD64 X5; DWORD64 X6; DWORD64 X7; DWORD64 X8; DWORD64 X9; DWORD64 X10; DWORD64 X11; DWORD64 X12; DWORD64 X13; DWORD64 X14; DWORD64 X15; DWORD64 X16; DWORD64 X17; DWORD64 X18; DWORD64 X19; DWORD64 X20; DWORD64 X21; DWORD64 X22; DWORD64 X23; DWORD64 X24; DWORD64 X25; DWORD64 X26; DWORD64 X27; DWORD64 X28; }; DWORD64 X[29]; }; /* +0x0f0 */ DWORD64 Fp; /* +0x0f8 */ DWORD64 Lr; /* +0x100 */ DWORD64 Sp; /* +0x108 */ DWORD64 Pc; // // Floating Point/NEON Registers // /* +0x110 */ NEON128 V[32]; /* +0x310 */ DWORD Fpcr; /* +0x314 */ DWORD Fpsr; // // Debug registers // /* +0x318 */ DWORD Bcr[ARM64_MAX_BREAKPOINTS]; /* +0x338 */ DWORD64 Bvr[ARM64_MAX_BREAKPOINTS]; /* +0x378 */ DWORD Wcr[ARM64_MAX_WATCHPOINTS]; /* +0x380 */ DWORD64 Wvr[ARM64_MAX_WATCHPOINTS]; /* +0x390 */ } CONTEXT, *PCONTEXT, *LPCONTEXT; // // Nonvolatile context pointer record. // typedef struct _KNONVOLATILE_CONTEXT_POINTERS { PDWORD64 X19; PDWORD64 X20; PDWORD64 X21; PDWORD64 X22; PDWORD64 X23; PDWORD64 X24; PDWORD64 X25; PDWORD64 X26; PDWORD64 X27; PDWORD64 X28; PDWORD64 Fp; PDWORD64 Lr; PDWORD64 D8; PDWORD64 D9; PDWORD64 D10; PDWORD64 D11; PDWORD64 D12; PDWORD64 D13; PDWORD64 D14; PDWORD64 D15; } KNONVOLATILE_CONTEXT_POINTERS, *PKNONVOLATILE_CONTEXT_POINTERS; #elif defined(HOST_LOONGARCH64) //Please refence "src/pal/src/arch/loongarch64/asmconstants.h" #define CONTEXT_LOONGARCH64 0x00800000 #define CONTEXT_CONTROL (CONTEXT_LOONGARCH64 | 0x1) #define CONTEXT_INTEGER (CONTEXT_LOONGARCH64 | 0x2) #define CONTEXT_FLOATING_POINT (CONTEXT_LOONGARCH64 | 0x4) #define CONTEXT_DEBUG_REGISTERS (CONTEXT_LOONGARCH64 | 0x8) #define CONTEXT_FULL (CONTEXT_CONTROL | CONTEXT_INTEGER | CONTEXT_FLOATING_POINT) #define CONTEXT_ALL (CONTEXT_CONTROL | CONTEXT_INTEGER | CONTEXT_FLOATING_POINT | CONTEXT_DEBUG_REGISTERS) #define CONTEXT_EXCEPTION_ACTIVE 0x8000000 #define CONTEXT_SERVICE_ACTIVE 0x10000000 #define CONTEXT_EXCEPTION_REQUEST 0x40000000 #define CONTEXT_EXCEPTION_REPORTING 0x80000000 // // This flag is set by the unwinder if it has unwound to a call // site, and cleared whenever it unwinds through a trap frame. // It is used by language-specific exception handlers to help // differentiate exception scopes during dispatching. // #define CONTEXT_UNWOUND_TO_CALL 0x20000000 // begin_ntoshvp // // Specify the number of breakpoints and watchpoints that the OS // will track. Architecturally, LOONGARCH64 supports up to 16. In practice, // however, almost no one implements more than 4 of each. // #define LOONGARCH64_MAX_BREAKPOINTS 8 #define LOONGARCH64_MAX_WATCHPOINTS 2 // // Context Frame // // This frame has a several purposes: 1) it is used as an argument to // NtContinue, 2) it is used to constuct a call frame for APC delivery, // and 3) it is used in the user level thread creation routines. // // // The flags field within this record controls the contents of a CONTEXT // record. // // If the context record is used as an input parameter, then for each // portion of the context record controlled by a flag whose value is // set, it is assumed that that portion of the context record contains // valid context. If the context record is being used to modify a threads // context, then only that portion of the threads context is modified. // // If the context record is used as an output parameter to capture the // context of a thread, then only those portions of the thread's context // corresponding to set flags will be returned. // typedef struct DECLSPEC_ALIGN(16) _CONTEXT { // // Control flags. // /* +0x000 */ DWORD ContextFlags; // // Integer registers, abi=N64. // DWORD64 R0; DWORD64 Ra; DWORD64 Tp; DWORD64 Sp; DWORD64 A0;//DWORD64 V0; DWORD64 A1;//DWORD64 V1; DWORD64 A2; DWORD64 A3; DWORD64 A4; DWORD64 A5; DWORD64 A6; DWORD64 A7; DWORD64 T0; DWORD64 T1; DWORD64 T2; DWORD64 T3; DWORD64 T4; DWORD64 T5; DWORD64 T6; DWORD64 T7; DWORD64 T8; DWORD64 X0; DWORD64 Fp; DWORD64 S0; DWORD64 S1; DWORD64 S2; DWORD64 S3; DWORD64 S4; DWORD64 S5; DWORD64 S6; DWORD64 S7; DWORD64 S8; DWORD64 Pc; // // Floating Point Registers // //TODO: support the SIMD. DWORD64 F[32]; DWORD Fcsr; } CONTEXT, *PCONTEXT, *LPCONTEXT; // // Nonvolatile context pointer record. // typedef struct _KNONVOLATILE_CONTEXT_POINTERS { PDWORD64 S0; PDWORD64 S1; PDWORD64 S2; PDWORD64 S3; PDWORD64 S4; PDWORD64 S5; PDWORD64 S6; PDWORD64 S7; PDWORD64 S8; PDWORD64 Fp; PDWORD64 Tp; PDWORD64 Ra; PDWORD64 F24; PDWORD64 F25; PDWORD64 F26; PDWORD64 F27; PDWORD64 F28; PDWORD64 F29; PDWORD64 F30; PDWORD64 F31; } KNONVOLATILE_CONTEXT_POINTERS, *PKNONVOLATILE_CONTEXT_POINTERS; #elif defined(HOST_S390X) // There is no context for s390x defined in winnt.h, // so we re-use the amd64 values. #define CONTEXT_S390X 0x100000 #define CONTEXT_CONTROL (CONTEXT_S390X | 0x1L) #define CONTEXT_INTEGER (CONTEXT_S390X | 0x2L) #define CONTEXT_FLOATING_POINT (CONTEXT_S390X | 0x4L) #define CONTEXT_FULL (CONTEXT_CONTROL | CONTEXT_INTEGER | CONTEXT_FLOATING_POINT) #define CONTEXT_ALL (CONTEXT_CONTROL | CONTEXT_INTEGER | CONTEXT_FLOATING_POINT) #define CONTEXT_EXCEPTION_ACTIVE 0x8000000 #define CONTEXT_SERVICE_ACTIVE 0x10000000 #define CONTEXT_EXCEPTION_REQUEST 0x40000000 #define CONTEXT_EXCEPTION_REPORTING 0x80000000 typedef struct DECLSPEC_ALIGN(8) _CONTEXT { // // Control flags. // DWORD ContextFlags; // // Integer registers. // union { DWORD64 Gpr[16]; struct { DWORD64 R0; DWORD64 R1; DWORD64 R2; DWORD64 R3; DWORD64 R4; DWORD64 R5; DWORD64 R6; DWORD64 R7; DWORD64 R8; DWORD64 R9; DWORD64 R10; DWORD64 R11; DWORD64 R12; DWORD64 R13; DWORD64 R14; DWORD64 R15; }; }; // // Floating-point registers. // union { DWORD64 Fpr[16]; struct { DWORD64 F0; DWORD64 F1; DWORD64 F2; DWORD64 F3; DWORD64 F4; DWORD64 F5; DWORD64 F6; DWORD64 F7; DWORD64 F8; DWORD64 F9; DWORD64 F10; DWORD64 F11; DWORD64 F12; DWORD64 F13; DWORD64 F14; DWORD64 F15; }; }; // // Control registers. // DWORD64 PSWMask; DWORD64 PSWAddr; } CONTEXT, *PCONTEXT, *LPCONTEXT; // // Nonvolatile context pointer record. // typedef struct _KNONVOLATILE_CONTEXT_POINTERS { PDWORD64 R6; PDWORD64 R7; PDWORD64 R8; PDWORD64 R9; PDWORD64 R10; PDWORD64 R11; PDWORD64 R12; PDWORD64 R13; PDWORD64 R14; PDWORD64 R15; } KNONVOLATILE_CONTEXT_POINTERS, *PKNONVOLATILE_CONTEXT_POINTERS; #else #error Unknown architecture for defining CONTEXT. #endif PALIMPORT BOOL PALAPI GetThreadContext( IN HANDLE hThread, IN OUT LPCONTEXT lpContext); PALIMPORT BOOL PALAPI SetThreadContext( IN HANDLE hThread, IN CONST CONTEXT *lpContext); #define THREAD_BASE_PRIORITY_LOWRT 15 #define THREAD_BASE_PRIORITY_MAX 2 #define THREAD_BASE_PRIORITY_MIN (-2) #define THREAD_BASE_PRIORITY_IDLE (-15) #define THREAD_PRIORITY_LOWEST THREAD_BASE_PRIORITY_MIN #define THREAD_PRIORITY_BELOW_NORMAL (THREAD_PRIORITY_LOWEST+1) #define THREAD_PRIORITY_NORMAL 0 #define THREAD_PRIORITY_HIGHEST THREAD_BASE_PRIORITY_MAX #define THREAD_PRIORITY_ABOVE_NORMAL (THREAD_PRIORITY_HIGHEST-1) #define THREAD_PRIORITY_ERROR_RETURN (MAXLONG) #define THREAD_PRIORITY_TIME_CRITICAL THREAD_BASE_PRIORITY_LOWRT #define THREAD_PRIORITY_IDLE THREAD_BASE_PRIORITY_IDLE PALIMPORT int PALAPI GetThreadPriority( IN HANDLE hThread); PALIMPORT BOOL PALAPI SetThreadPriority( IN HANDLE hThread, IN int nPriority); PALIMPORT BOOL PALAPI GetThreadTimes( IN HANDLE hThread, OUT LPFILETIME lpCreationTime, OUT LPFILETIME lpExitTime, OUT LPFILETIME lpKernelTime, OUT LPFILETIME lpUserTime); PALIMPORT HRESULT PALAPI SetThreadDescription( IN HANDLE hThread, IN PCWSTR lpThreadDescription ); #define TLS_OUT_OF_INDEXES ((DWORD)0xFFFFFFFF) PALIMPORT PVOID PALAPI PAL_GetStackBase(); PALIMPORT PVOID PALAPI PAL_GetStackLimit(); PALIMPORT DWORD PALAPI PAL_GetLogicalCpuCountFromOS(); PALIMPORT DWORD PALAPI PAL_GetTotalCpuCount(); PALIMPORT size_t PALAPI PAL_GetRestrictedPhysicalMemoryLimit(); PALIMPORT BOOL PALAPI PAL_GetPhysicalMemoryUsed(size_t* val); PALIMPORT BOOL PALAPI PAL_GetCpuLimit(UINT* val); PALIMPORT size_t PALAPI PAL_GetLogicalProcessorCacheSizeFromOS(); typedef BOOL(*UnwindReadMemoryCallback)(PVOID address, PVOID buffer, SIZE_T size); PALIMPORT BOOL PALAPI PAL_VirtualUnwind(CONTEXT *context, KNONVOLATILE_CONTEXT_POINTERS *contextPointers); PALIMPORT BOOL PALAPI PAL_VirtualUnwindOutOfProc(CONTEXT *context, KNONVOLATILE_CONTEXT_POINTERS *contextPointers, PULONG64 functionStart, SIZE_T baseAddress, UnwindReadMemoryCallback readMemoryCallback); #define GetLogicalProcessorCacheSizeFromOS PAL_GetLogicalProcessorCacheSizeFromOS /* PAL_CS_NATIVE_DATA_SIZE is defined as sizeof(PAL_CRITICAL_SECTION_NATIVE_DATA) */ #if defined(__APPLE__) && defined(__i386__) #define PAL_CS_NATIVE_DATA_SIZE 76 #elif defined(__APPLE__) && defined(__x86_64__) #define PAL_CS_NATIVE_DATA_SIZE 120 #elif defined(__APPLE__) && defined(HOST_ARM64) #define PAL_CS_NATIVE_DATA_SIZE 120 #elif defined(__FreeBSD__) && defined(HOST_X86) #define PAL_CS_NATIVE_DATA_SIZE 12 #elif defined(__FreeBSD__) && defined(__x86_64__) #define PAL_CS_NATIVE_DATA_SIZE 24 #elif defined(__linux__) && defined(HOST_ARM) #define PAL_CS_NATIVE_DATA_SIZE 80 #elif defined(__linux__) && defined(HOST_ARM64) #define PAL_CS_NATIVE_DATA_SIZE 116 #elif defined(__linux__) && defined(__i386__) #define PAL_CS_NATIVE_DATA_SIZE 76 #elif defined(__linux__) && defined(__x86_64__) #define PAL_CS_NATIVE_DATA_SIZE 96 #elif defined(__linux__) && defined(HOST_S390X) #define PAL_CS_NATIVE_DATA_SIZE 96 #elif defined(__NetBSD__) && defined(__amd64__) #define PAL_CS_NATIVE_DATA_SIZE 96 #elif defined(__NetBSD__) && defined(__earm__) #define PAL_CS_NATIVE_DATA_SIZE 56 #elif defined(__NetBSD__) && defined(__i386__) #define PAL_CS_NATIVE_DATA_SIZE 56 #elif defined(__sun) && defined(__x86_64__) #define PAL_CS_NATIVE_DATA_SIZE 48 #elif defined(__linux__) && defined(__loongarch64) #define PAL_CS_NATIVE_DATA_SIZE 96 #else #warning #error PAL_CS_NATIVE_DATA_SIZE is not defined for this architecture #endif // typedef struct _CRITICAL_SECTION { PVOID DebugInfo; LONG LockCount; LONG RecursionCount; HANDLE OwningThread; ULONG_PTR SpinCount; #ifdef PAL_TRACK_CRITICAL_SECTIONS_DATA BOOL bInternal; #endif // PAL_TRACK_CRITICAL_SECTIONS_DATA volatile DWORD dwInitState; union CSNativeDataStorage { BYTE rgNativeDataStorage[PAL_CS_NATIVE_DATA_SIZE]; PVOID pvAlign; // make sure the storage is machine-pointer-size aligned } csnds; } CRITICAL_SECTION, *PCRITICAL_SECTION, *LPCRITICAL_SECTION; PALIMPORT VOID PALAPI EnterCriticalSection(IN OUT LPCRITICAL_SECTION lpCriticalSection); PALIMPORT VOID PALAPI LeaveCriticalSection(IN OUT LPCRITICAL_SECTION lpCriticalSection); PALIMPORT VOID PALAPI InitializeCriticalSection(OUT LPCRITICAL_SECTION lpCriticalSection); PALIMPORT BOOL PALAPI InitializeCriticalSectionEx(LPCRITICAL_SECTION lpCriticalSection, DWORD dwSpinCount, DWORD Flags); PALIMPORT VOID PALAPI DeleteCriticalSection(IN OUT LPCRITICAL_SECTION lpCriticalSection); PALIMPORT BOOL PALAPI TryEnterCriticalSection(IN OUT LPCRITICAL_SECTION lpCriticalSection); #define SEM_FAILCRITICALERRORS 0x0001 #define SEM_NOOPENFILEERRORBOX 0x8000 PALIMPORT UINT PALAPI SetErrorMode( IN UINT uMode); #define PAGE_NOACCESS 0x01 #define PAGE_READONLY 0x02 #define PAGE_READWRITE 0x04 #define PAGE_WRITECOPY 0x08 #define PAGE_EXECUTE 0x10 #define PAGE_EXECUTE_READ 0x20 #define PAGE_EXECUTE_READWRITE 0x40 #define PAGE_EXECUTE_WRITECOPY 0x80 #define MEM_COMMIT 0x1000 #define MEM_RESERVE 0x2000 #define MEM_DECOMMIT 0x4000 #define MEM_RELEASE 0x8000 #define MEM_RESET 0x80000 #define MEM_FREE 0x10000 #define MEM_PRIVATE 0x20000 #define MEM_MAPPED 0x40000 #define MEM_TOP_DOWN 0x100000 #define MEM_WRITE_WATCH 0x200000 #define MEM_LARGE_PAGES 0x20000000 #define MEM_RESERVE_EXECUTABLE 0x40000000 // reserve memory using executable memory allocator PALIMPORT HANDLE PALAPI CreateFileMappingW( IN HANDLE hFile, IN LPSECURITY_ATTRIBUTES lpFileMappingAttributes, IN DWORD flProtect, IN DWORD dwMaxmimumSizeHigh, IN DWORD dwMaximumSizeLow, IN LPCWSTR lpName); #define CreateFileMapping CreateFileMappingW #define SECTION_QUERY 0x0001 #define SECTION_MAP_WRITE 0x0002 #define SECTION_MAP_READ 0x0004 #define SECTION_ALL_ACCESS (SECTION_MAP_READ | SECTION_MAP_WRITE) // diff from winnt.h #define FILE_MAP_WRITE SECTION_MAP_WRITE #define FILE_MAP_READ SECTION_MAP_READ #define FILE_MAP_ALL_ACCESS SECTION_ALL_ACCESS #define FILE_MAP_COPY SECTION_QUERY PALIMPORT HANDLE PALAPI OpenFileMappingW( IN DWORD dwDesiredAccess, IN BOOL bInheritHandle, IN LPCWSTR lpName); #define OpenFileMapping OpenFileMappingW typedef INT_PTR (PALAPI_NOEXPORT *FARPROC)(); PALIMPORT LPVOID PALAPI MapViewOfFile( IN HANDLE hFileMappingObject, IN DWORD dwDesiredAccess, IN DWORD dwFileOffsetHigh, IN DWORD dwFileOffsetLow, IN SIZE_T dwNumberOfBytesToMap); PALIMPORT LPVOID PALAPI MapViewOfFileEx( IN HANDLE hFileMappingObject, IN DWORD dwDesiredAccess, IN DWORD dwFileOffsetHigh, IN DWORD dwFileOffsetLow, IN SIZE_T dwNumberOfBytesToMap, IN LPVOID lpBaseAddress); PALIMPORT BOOL PALAPI UnmapViewOfFile( IN LPCVOID lpBaseAddress); PALIMPORT HMODULE PALAPI LoadLibraryW( IN LPCWSTR lpLibFileName); PALIMPORT HMODULE PALAPI LoadLibraryExW( IN LPCWSTR lpLibFileName, IN /*Reserved*/ HANDLE hFile, IN DWORD dwFlags); PALIMPORT NATIVE_LIBRARY_HANDLE PALAPI PAL_LoadLibraryDirect( IN LPCWSTR lpLibFileName); PALIMPORT BOOL PALAPI PAL_FreeLibraryDirect( IN NATIVE_LIBRARY_HANDLE dl_handle); PALIMPORT HMODULE PALAPI PAL_GetPalHostModule(); PALIMPORT FARPROC PALAPI PAL_GetProcAddressDirect( IN NATIVE_LIBRARY_HANDLE dl_handle, IN LPCSTR lpProcName); /*++ Function: PAL_LOADLoadPEFile Abstract Loads a PE file into memory. Properly maps all of the sections in the PE file. Returns a pointer to the loaded base. Parameters: IN hFile - The file to load IN offset - offset within hFile where the PE "file" is located Return value: A valid base address if successful. 0 if failure --*/ PALIMPORT PVOID PALAPI PAL_LOADLoadPEFile(HANDLE hFile, size_t offset); /*++ PAL_LOADUnloadPEFile Unload a PE file that was loaded by PAL_LOADLoadPEFile(). Parameters: IN ptr - the file pointer returned by PAL_LOADLoadPEFile() Return value: TRUE - success FALSE - failure (incorrect ptr, etc.) --*/ PALIMPORT BOOL PALAPI PAL_LOADUnloadPEFile(PVOID ptr); /*++ PAL_LOADMarkSectionAsNotNeeded Mark a section as NotNeeded that was loaded by PAL_LOADLoadPEFile(). Parameters: IN ptr - the section address mapped by PAL_LOADLoadPEFile() Return value: TRUE - success FALSE - failure (incorrect ptr, etc.) --*/ BOOL PALAPI PAL_LOADMarkSectionAsNotNeeded(void * ptr); #ifdef UNICODE #define LoadLibrary LoadLibraryW #define LoadLibraryEx LoadLibraryExW #else #define LoadLibrary LoadLibraryA #define LoadLibraryEx LoadLibraryExA #endif PALIMPORT FARPROC PALAPI GetProcAddress( IN HMODULE hModule, IN LPCSTR lpProcName); PALIMPORT BOOL PALAPI FreeLibrary( IN OUT HMODULE hLibModule); PALIMPORT PAL_NORETURN VOID PALAPI FreeLibraryAndExitThread( IN HMODULE hLibModule, IN DWORD dwExitCode); PALIMPORT BOOL PALAPI DisableThreadLibraryCalls( IN HMODULE hLibModule); PALIMPORT DWORD PALAPI GetModuleFileNameW( IN HMODULE hModule, OUT LPWSTR lpFileName, IN DWORD nSize); #ifdef UNICODE #define GetModuleFileName GetModuleFileNameW #else #define GetModuleFileName GetModuleFileNameA #endif PALIMPORT DWORD PALAPI GetModuleFileNameExW( IN HANDLE hProcess, IN HMODULE hModule, OUT LPWSTR lpFilename, IN DWORD nSize ); #ifdef UNICODE #define GetModuleFileNameEx GetModuleFileNameExW #endif // Get base address of the module containing a given symbol PALIMPORT LPCVOID PALAPI PAL_GetSymbolModuleBase(PVOID symbol); PALIMPORT int PALAPI PAL_CopyModuleData(PVOID moduleBase, PVOID destinationBufferStart, PVOID destinationBufferEnd);; PALIMPORT LPCSTR PALAPI PAL_GetLoadLibraryError(); PALIMPORT LPVOID PALAPI PAL_VirtualReserveFromExecutableMemoryAllocatorWithinRange( IN LPCVOID lpBeginAddress, IN LPCVOID lpEndAddress, IN SIZE_T dwSize); PALIMPORT void PALAPI PAL_GetExecutableMemoryAllocatorPreferredRange( OUT PVOID *start, OUT PVOID *end); PALIMPORT LPVOID PALAPI VirtualAlloc( IN LPVOID lpAddress, IN SIZE_T dwSize, IN DWORD flAllocationType, IN DWORD flProtect); PALIMPORT BOOL PALAPI VirtualFree( IN LPVOID lpAddress, IN SIZE_T dwSize, IN DWORD dwFreeType); #if defined(HOST_OSX) && defined(HOST_ARM64) PALIMPORT VOID PALAPI PAL_JitWriteProtect(bool writeEnable); #endif // defined(HOST_OSX) && defined(HOST_ARM64) PALIMPORT BOOL PALAPI VirtualProtect( IN LPVOID lpAddress, IN SIZE_T dwSize, IN DWORD flNewProtect, OUT PDWORD lpflOldProtect); typedef struct _MEMORYSTATUSEX { DWORD dwLength; DWORD dwMemoryLoad; DWORDLONG ullTotalPhys; DWORDLONG ullAvailPhys; DWORDLONG ullTotalPageFile; DWORDLONG ullAvailPageFile; DWORDLONG ullTotalVirtual; DWORDLONG ullAvailVirtual; DWORDLONG ullAvailExtendedVirtual; } MEMORYSTATUSEX, *LPMEMORYSTATUSEX; PALIMPORT BOOL PALAPI GlobalMemoryStatusEx( IN OUT LPMEMORYSTATUSEX lpBuffer); typedef struct _MEMORY_BASIC_INFORMATION { PVOID BaseAddress; PVOID AllocationBase_PAL_Undefined; DWORD AllocationProtect; SIZE_T RegionSize; DWORD State; DWORD Protect; DWORD Type; } MEMORY_BASIC_INFORMATION, *PMEMORY_BASIC_INFORMATION; PALIMPORT SIZE_T PALAPI VirtualQuery( IN LPCVOID lpAddress, OUT PMEMORY_BASIC_INFORMATION lpBuffer, IN SIZE_T dwLength); #define MoveMemory memmove #define CopyMemory memcpy #define FillMemory(Destination,Length,Fill) memset((Destination),(Fill),(Length)) #define ZeroMemory(Destination,Length) memset((Destination),0,(Length)) PALIMPORT BOOL PALAPI FlushInstructionCache( IN HANDLE hProcess, IN LPCVOID lpBaseAddress, IN SIZE_T dwSize); #define MAX_LEADBYTES 12 #define MAX_DEFAULTCHAR 2 PALIMPORT UINT PALAPI GetACP(void); typedef struct _cpinfo { UINT MaxCharSize; BYTE DefaultChar[MAX_DEFAULTCHAR]; BYTE LeadByte[MAX_LEADBYTES]; } CPINFO, *LPCPINFO; #define MB_PRECOMPOSED 0x00000001 #define MB_ERR_INVALID_CHARS 0x00000008 PALIMPORT int PALAPI MultiByteToWideChar( IN UINT CodePage, IN DWORD dwFlags, IN LPCSTR lpMultiByteStr, IN int cbMultiByte, OUT LPWSTR lpWideCharStr, IN int cchWideChar); #define WC_NO_BEST_FIT_CHARS 0x00000400 PALIMPORT int PALAPI WideCharToMultiByte( IN UINT CodePage, IN DWORD dwFlags, IN LPCWSTR lpWideCharStr, IN int cchWideChar, OUT LPSTR lpMultiByteStr, IN int cbMultyByte, IN LPCSTR lpDefaultChar, OUT LPBOOL lpUsedDefaultChar); #define EXCEPTION_NONCONTINUABLE 0x1 #define EXCEPTION_UNWINDING 0x2 #define EXCEPTION_EXIT_UNWIND 0x4 // Exit unwind is in progress (not used by PAL SEH) #define EXCEPTION_NESTED_CALL 0x10 // Nested exception handler call #define EXCEPTION_TARGET_UNWIND 0x20 // Target unwind in progress #define EXCEPTION_COLLIDED_UNWIND 0x40 // Collided exception handler call #define EXCEPTION_SKIP_VEH 0x200 #define EXCEPTION_UNWIND (EXCEPTION_UNWINDING | EXCEPTION_EXIT_UNWIND | \ EXCEPTION_TARGET_UNWIND | EXCEPTION_COLLIDED_UNWIND) #define IS_DISPATCHING(Flag) ((Flag & EXCEPTION_UNWIND) == 0) #define IS_UNWINDING(Flag) ((Flag & EXCEPTION_UNWIND) != 0) #define IS_TARGET_UNWIND(Flag) (Flag & EXCEPTION_TARGET_UNWIND) #define EXCEPTION_IS_SIGNAL 0x100 #define EXCEPTION_MAXIMUM_PARAMETERS 15 // Index in the ExceptionInformation array where we will keep the reference // to the native exception that needs to be deleted when dispatching // exception in managed code. #define NATIVE_EXCEPTION_ASYNC_SLOT (EXCEPTION_MAXIMUM_PARAMETERS-1) typedef struct _EXCEPTION_RECORD { DWORD ExceptionCode; DWORD ExceptionFlags; struct _EXCEPTION_RECORD *ExceptionRecord; PVOID ExceptionAddress; DWORD NumberParameters; ULONG_PTR ExceptionInformation[EXCEPTION_MAXIMUM_PARAMETERS]; } EXCEPTION_RECORD, *PEXCEPTION_RECORD; typedef struct _EXCEPTION_POINTERS { PEXCEPTION_RECORD ExceptionRecord; PCONTEXT ContextRecord; } EXCEPTION_POINTERS, *PEXCEPTION_POINTERS, *LPEXCEPTION_POINTERS; typedef LONG EXCEPTION_DISPOSITION; enum { ExceptionContinueExecution, ExceptionContinueSearch, ExceptionNestedException, ExceptionCollidedUnwind, }; // // A function table entry is generated for each frame function. // typedef struct _RUNTIME_FUNCTION { DWORD BeginAddress; #ifdef TARGET_AMD64 DWORD EndAddress; #endif DWORD UnwindData; } RUNTIME_FUNCTION, *PRUNTIME_FUNCTION; #define STANDARD_RIGHTS_REQUIRED (0x000F0000L) #define SYNCHRONIZE (0x00100000L) #define READ_CONTROL (0x00020000L) #define MAXIMUM_ALLOWED (0x02000000L) #define EVENT_MODIFY_STATE (0x0002) #define EVENT_ALL_ACCESS (STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | 0x3) #define MUTANT_QUERY_STATE (0x0001) #define MUTANT_ALL_ACCESS (STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | MUTANT_QUERY_STATE) #define MUTEX_ALL_ACCESS MUTANT_ALL_ACCESS #define SEMAPHORE_MODIFY_STATE (0x0002) #define SEMAPHORE_ALL_ACCESS (STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | 0x3) #define PROCESS_TERMINATE (0x0001) #define PROCESS_CREATE_THREAD (0x0002) #define PROCESS_SET_SESSIONID (0x0004) #define PROCESS_VM_OPERATION (0x0008) #define PROCESS_VM_READ (0x0010) #define PROCESS_VM_WRITE (0x0020) #define PROCESS_DUP_HANDLE (0x0040) #define PROCESS_CREATE_PROCESS (0x0080) #define PROCESS_SET_QUOTA (0x0100) #define PROCESS_SET_INFORMATION (0x0200) #define PROCESS_QUERY_INFORMATION (0x0400) #define PROCESS_SUSPEND_RESUME (0x0800) #define PROCESS_ALL_ACCESS (STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | \ 0xFFF) PALIMPORT HANDLE PALAPI OpenProcess( IN DWORD dwDesiredAccess, /* PROCESS_DUP_HANDLE or PROCESS_ALL_ACCESS */ IN BOOL bInheritHandle, IN DWORD dwProcessId ); PALIMPORT BOOL PALAPI EnumProcessModules( IN HANDLE hProcess, OUT HMODULE *lphModule, IN DWORD cb, OUT LPDWORD lpcbNeeded ); PALIMPORT VOID PALAPI OutputDebugStringA( IN LPCSTR lpOutputString); PALIMPORT VOID PALAPI OutputDebugStringW( IN LPCWSTR lpOutputStrig); #ifdef UNICODE #define OutputDebugString OutputDebugStringW #else #define OutputDebugString OutputDebugStringA #endif PALIMPORT VOID PALAPI DebugBreak(); PALIMPORT DWORD PALAPI GetEnvironmentVariableW( IN LPCWSTR lpName, OUT LPWSTR lpBuffer, IN DWORD nSize); #ifdef UNICODE #define GetEnvironmentVariable GetEnvironmentVariableW #else #define GetEnvironmentVariable GetEnvironmentVariableA #endif PALIMPORT BOOL PALAPI SetEnvironmentVariableW( IN LPCWSTR lpName, IN LPCWSTR lpValue); #ifdef UNICODE #define SetEnvironmentVariable SetEnvironmentVariableW #else #define SetEnvironmentVariable SetEnvironmentVariableA #endif PALIMPORT LPWSTR PALAPI GetEnvironmentStringsW(); #define GetEnvironmentStrings GetEnvironmentStringsW PALIMPORT BOOL PALAPI FreeEnvironmentStringsW( IN LPWSTR); #define FreeEnvironmentStrings FreeEnvironmentStringsW PALIMPORT BOOL PALAPI CloseHandle( IN OUT HANDLE hObject); PALIMPORT VOID PALAPI RaiseException( IN DWORD dwExceptionCode, IN DWORD dwExceptionFlags, IN DWORD nNumberOfArguments, IN CONST ULONG_PTR *lpArguments); PALIMPORT VOID PALAPI RaiseFailFastException( IN PEXCEPTION_RECORD pExceptionRecord, IN PCONTEXT pContextRecord, IN DWORD dwFlags); PALIMPORT DWORD PALAPI GetTickCount(); PALIMPORT ULONGLONG PALAPI GetTickCount64(); PALIMPORT BOOL PALAPI QueryPerformanceCounter( OUT LARGE_INTEGER *lpPerformanceCount ); PALIMPORT BOOL PALAPI QueryPerformanceFrequency( OUT LARGE_INTEGER *lpFrequency ); PALIMPORT BOOL PALAPI QueryThreadCycleTime( IN HANDLE ThreadHandle, OUT PULONG64 CycleTime); PALIMPORT INT PALAPI PAL_nanosleep( IN long timeInNs); typedef EXCEPTION_DISPOSITION (PALAPI_NOEXPORT *PVECTORED_EXCEPTION_HANDLER)( struct _EXCEPTION_POINTERS *ExceptionPointers); // Define BitScanForward64 and BitScanForward // Per MSDN, BitScanForward64 will search the mask data from LSB to MSB for a set bit. // If one is found, its bit position is stored in the out PDWORD argument and 1 is returned; // otherwise, an undefined value is stored in the out PDWORD argument and 0 is returned. // // On GCC, the equivalent function is __builtin_ffsll. It returns 1+index of the least // significant set bit, or 0 if if mask is zero. // // The same is true for BitScanForward, except that the GCC function is __builtin_ffs. EXTERN_C PALIMPORT inline unsigned char PALAPI BitScanForward( IN OUT PDWORD Index, IN UINT qwMask) { int iIndex = __builtin_ffs(qwMask); // Set the Index after deducting unity *Index = (DWORD)(iIndex - 1); // Both GCC and Clang generate better, smaller code if we check whether the // mask was/is zero rather than the equivalent check that iIndex is zero. return qwMask != 0 ? TRUE : FALSE; } EXTERN_C PALIMPORT inline unsigned char PALAPI BitScanForward64( IN OUT PDWORD Index, IN UINT64 qwMask) { int iIndex = __builtin_ffsll(qwMask); // Set the Index after deducting unity *Index = (DWORD)(iIndex - 1); // Both GCC and Clang generate better, smaller code if we check whether the // mask was/is zero rather than the equivalent check that iIndex is zero. return qwMask != 0 ? TRUE : FALSE; } // Define BitScanReverse64 and BitScanReverse // Per MSDN, BitScanReverse64 will search the mask data from MSB to LSB for a set bit. // If one is found, its bit position is stored in the out PDWORD argument and 1 is returned. // Otherwise, an undefined value is stored in the out PDWORD argument and 0 is returned. // // GCC/clang don't have a directly equivalent intrinsic; they do provide the __builtin_clzll // intrinsic, which returns the number of leading 0-bits in x starting at the most significant // bit position (the result is undefined when x = 0). // // The same is true for BitScanReverse, except that the GCC function is __builtin_clzl. EXTERN_C PALIMPORT inline unsigned char PALAPI BitScanReverse( IN OUT PDWORD Index, IN UINT qwMask) { // The result of __builtin_clzl is undefined when qwMask is zero, // but it's still OK to call the intrinsic in that case (just don't use the output). // Unconditionally calling the intrinsic in this way allows the compiler to // emit branchless code for this function when possible (depending on how the // intrinsic is implemented for the target platform). int lzcount = __builtin_clzl(qwMask); *Index = (DWORD)(31 - lzcount); return qwMask != 0; } EXTERN_C PALIMPORT inline unsigned char PALAPI BitScanReverse64( IN OUT PDWORD Index, IN UINT64 qwMask) { // The result of __builtin_clzll is undefined when qwMask is zero, // but it's still OK to call the intrinsic in that case (just don't use the output). // Unconditionally calling the intrinsic in this way allows the compiler to // emit branchless code for this function when possible (depending on how the // intrinsic is implemented for the target platform). int lzcount = __builtin_clzll(qwMask); *Index = (DWORD)(63 - lzcount); return qwMask != 0; } FORCEINLINE void PAL_ArmInterlockedOperationBarrier() { #ifdef HOST_ARM64 // On arm64, most of the __sync* functions generate a code sequence like: // loop: // ldaxr (load acquire exclusive) // ... // stlxr (store release exclusive) // cbnz loop // // It is possible for a load following the code sequence above to be reordered to occur prior to the store above due to the // release barrier, this is substantiated by https://github.com/dotnet/coreclr/pull/17508. Interlocked operations in the PAL // require the load to occur after the store. This memory barrier should be used following a call to a __sync* function to // prevent that reordering. Code generated for arm32 includes a 'dmb' after 'cbnz', so no issue there at the moment. __sync_synchronize(); #endif // HOST_ARM64 #ifdef HOST_LOONGARCH64 __sync_synchronize(); #endif } /*++ Function: InterlockedAdd The InterlockedAdd function adds the value of the specified variable with another specified value. The function prevents more than one thread from using the same variable simultaneously. Parameters lpAddend [in/out] Pointer to the variable to add. lpAddend [in] The value to add. Return Values The return value is the resulting added value. --*/ EXTERN_C PALIMPORT inline LONG PALAPI InterlockedAdd( IN OUT LONG volatile *lpAddend, IN LONG value) { LONG result = __sync_add_and_fetch(lpAddend, value); PAL_ArmInterlockedOperationBarrier(); return result; } EXTERN_C PALIMPORT inline LONGLONG PALAPI InterlockedAdd64( IN OUT LONGLONG volatile *lpAddend, IN LONGLONG value) { LONGLONG result = __sync_add_and_fetch(lpAddend, value); PAL_ArmInterlockedOperationBarrier(); return result; } /*++ Function: InterlockedIncrement The InterlockedIncrement function increments (increases by one) the value of the specified variable and checks the resulting value. The function prevents more than one thread from using the same variable simultaneously. Parameters lpAddend [in/out] Pointer to the variable to increment. Return Values The return value is the resulting incremented value. --*/ EXTERN_C PALIMPORT inline LONG PALAPI InterlockedIncrement( IN OUT LONG volatile *lpAddend) { LONG result = __sync_add_and_fetch(lpAddend, (LONG)1); PAL_ArmInterlockedOperationBarrier(); return result; } EXTERN_C PALIMPORT inline LONGLONG PALAPI InterlockedIncrement64( IN OUT LONGLONG volatile *lpAddend) { LONGLONG result = __sync_add_and_fetch(lpAddend, (LONGLONG)1); PAL_ArmInterlockedOperationBarrier(); return result; } /*++ Function: InterlockedDecrement The InterlockedDecrement function decrements (decreases by one) the value of the specified variable and checks the resulting value. The function prevents more than one thread from using the same variable simultaneously. Parameters lpAddend [in/out] Pointer to the variable to decrement. Return Values The return value is the resulting decremented value. --*/ EXTERN_C PALIMPORT inline LONG PALAPI InterlockedDecrement( IN OUT LONG volatile *lpAddend) { LONG result = __sync_sub_and_fetch(lpAddend, (LONG)1); PAL_ArmInterlockedOperationBarrier(); return result; } #define InterlockedDecrementAcquire InterlockedDecrement #define InterlockedDecrementRelease InterlockedDecrement EXTERN_C PALIMPORT inline LONGLONG PALAPI InterlockedDecrement64( IN OUT LONGLONG volatile *lpAddend) { LONGLONG result = __sync_sub_and_fetch(lpAddend, (LONGLONG)1); PAL_ArmInterlockedOperationBarrier(); return result; } /*++ Function: InterlockedExchange The InterlockedExchange function atomically exchanges a pair of values. The function prevents more than one thread from using the same variable simultaneously. Parameters Target [in/out] Pointer to the value to exchange. The function sets this variable to Value, and returns its prior value. Value [in] Specifies a new value for the variable pointed to by Target. Return Values The function returns the initial value pointed to by Target. --*/ EXTERN_C PALIMPORT inline LONG PALAPI InterlockedExchange( IN OUT LONG volatile *Target, IN LONG Value) { LONG result = __atomic_exchange_n(Target, Value, __ATOMIC_ACQ_REL); PAL_ArmInterlockedOperationBarrier(); return result; } EXTERN_C PALIMPORT inline LONGLONG PALAPI InterlockedExchange64( IN OUT LONGLONG volatile *Target, IN LONGLONG Value) { LONGLONG result = __atomic_exchange_n(Target, Value, __ATOMIC_ACQ_REL); PAL_ArmInterlockedOperationBarrier(); return result; } /*++ Function: InterlockedCompareExchange The InterlockedCompareExchange function performs an atomic comparison of the specified values and exchanges the values, based on the outcome of the comparison. The function prevents more than one thread from using the same variable simultaneously. If you are exchanging pointer values, this function has been superseded by the InterlockedCompareExchangePointer function. Parameters Destination [in/out] Specifies the address of the destination value. The sign is ignored. Exchange [in] Specifies the exchange value. The sign is ignored. Comperand [in] Specifies the value to compare to Destination. The sign is ignored. Return Values The return value is the initial value of the destination. --*/ EXTERN_C PALIMPORT inline LONG PALAPI InterlockedCompareExchange( IN OUT LONG volatile *Destination, IN LONG Exchange, IN LONG Comperand) { LONG result = __sync_val_compare_and_swap( Destination, /* The pointer to a variable whose value is to be compared with. */ Comperand, /* The value to be compared */ Exchange /* The value to be stored */); PAL_ArmInterlockedOperationBarrier(); return result; } #define InterlockedCompareExchangeAcquire InterlockedCompareExchange #define InterlockedCompareExchangeRelease InterlockedCompareExchange // See the 32-bit variant in interlock2.s EXTERN_C PALIMPORT inline LONGLONG PALAPI InterlockedCompareExchange64( IN OUT LONGLONG volatile *Destination, IN LONGLONG Exchange, IN LONGLONG Comperand) { LONGLONG result = __sync_val_compare_and_swap( Destination, /* The pointer to a variable whose value is to be compared with. */ Comperand, /* The value to be compared */ Exchange /* The value to be stored */); PAL_ArmInterlockedOperationBarrier(); return result; } /*++ Function: InterlockedExchangeAdd The InterlockedExchangeAdd function atomically adds the value of 'Value' to the variable that 'Addend' points to. Parameters lpAddend [in/out] Pointer to the variable to to added. Return Values The return value is the original value that 'Addend' pointed to. --*/ EXTERN_C PALIMPORT inline LONG PALAPI InterlockedExchangeAdd( IN OUT LONG volatile *Addend, IN LONG Value) { LONG result = __sync_fetch_and_add(Addend, Value); PAL_ArmInterlockedOperationBarrier(); return result; } EXTERN_C PALIMPORT inline LONGLONG PALAPI InterlockedExchangeAdd64( IN OUT LONGLONG volatile *Addend, IN LONGLONG Value) { LONGLONG result = __sync_fetch_and_add(Addend, Value); PAL_ArmInterlockedOperationBarrier(); return result; } EXTERN_C PALIMPORT inline LONG PALAPI InterlockedAnd( IN OUT LONG volatile *Destination, IN LONG Value) { LONG result = __sync_fetch_and_and(Destination, Value); PAL_ArmInterlockedOperationBarrier(); return result; } EXTERN_C PALIMPORT inline LONG PALAPI InterlockedOr( IN OUT LONG volatile *Destination, IN LONG Value) { LONG result = __sync_fetch_and_or(Destination, Value); PAL_ArmInterlockedOperationBarrier(); return result; } EXTERN_C PALIMPORT inline UCHAR PALAPI InterlockedBitTestAndReset( IN OUT LONG volatile *Base, IN LONG Bit) { return (InterlockedAnd(Base, ~(1 << Bit)) & (1 << Bit)) != 0; } EXTERN_C PALIMPORT inline UCHAR PALAPI InterlockedBitTestAndSet( IN OUT LONG volatile *Base, IN LONG Bit) { return (InterlockedOr(Base, (1 << Bit)) & (1 << Bit)) != 0; } #if defined(HOST_64BIT) #define InterlockedExchangePointer(Target, Value) \ ((PVOID)InterlockedExchange64((PLONG64)(Target), (LONGLONG)(Value))) #define InterlockedCompareExchangePointer(Destination, ExChange, Comperand) \ ((PVOID)InterlockedCompareExchange64((PLONG64)(Destination), (LONGLONG)(ExChange), (LONGLONG)(Comperand))) #else #define InterlockedExchangePointer(Target, Value) \ ((PVOID)(UINT_PTR)InterlockedExchange((PLONG)(UINT_PTR)(Target), (LONG)(UINT_PTR)(Value))) #define InterlockedCompareExchangePointer(Destination, ExChange, Comperand) \ ((PVOID)(UINT_PTR)InterlockedCompareExchange((PLONG)(UINT_PTR)(Destination), (LONG)(UINT_PTR)(ExChange), (LONG)(UINT_PTR)(Comperand))) #endif /*++ Function: MemoryBarrier The MemoryBarrier function creates a full memory barrier. --*/ EXTERN_C PALIMPORT inline VOID PALAPI MemoryBarrier() { __sync_synchronize(); } EXTERN_C PALIMPORT inline VOID PALAPI YieldProcessor() { #if defined(HOST_X86) || defined(HOST_AMD64) __asm__ __volatile__( "rep\n" "nop"); #elif defined(HOST_ARM) || defined(HOST_ARM64) __asm__ __volatile__( "yield"); #elif defined(HOST_LOONGARCH64) __asm__ volatile( "dbar 0; \n"); #else return; #endif } PALIMPORT DWORD PALAPI GetCurrentProcessorNumber(); /*++ Function: PAL_HasGetCurrentProcessorNumber Checks if GetCurrentProcessorNumber is available in the current environment --*/ PALIMPORT BOOL PALAPI PAL_HasGetCurrentProcessorNumber(); #define FORMAT_MESSAGE_ALLOCATE_BUFFER 0x00000100 #define FORMAT_MESSAGE_IGNORE_INSERTS 0x00000200 #define FORMAT_MESSAGE_FROM_STRING 0x00000400 #define FORMAT_MESSAGE_FROM_SYSTEM 0x00001000 #define FORMAT_MESSAGE_ARGUMENT_ARRAY 0x00002000 #define FORMAT_MESSAGE_MAX_WIDTH_MASK 0x000000FF PALIMPORT DWORD PALAPI FormatMessageW( IN DWORD dwFlags, IN LPCVOID lpSource, IN DWORD dwMessageId, IN DWORD dwLanguageId, OUT LPWSTR lpBffer, IN DWORD nSize, IN va_list *Arguments); #ifdef UNICODE #define FormatMessage FormatMessageW #endif PALIMPORT DWORD PALAPI GetLastError(); PALIMPORT VOID PALAPI SetLastError( IN DWORD dwErrCode); PALIMPORT LPWSTR PALAPI GetCommandLineW(); #ifdef UNICODE #define GetCommandLine GetCommandLineW #endif PALIMPORT VOID PALAPI RtlRestoreContext( IN PCONTEXT ContextRecord, IN PEXCEPTION_RECORD ExceptionRecord ); PALIMPORT VOID PALAPI RtlCaptureContext( OUT PCONTEXT ContextRecord ); PALIMPORT VOID PALAPI FlushProcessWriteBuffers(); typedef void (*PAL_ActivationFunction)(CONTEXT *context); typedef BOOL (*PAL_SafeActivationCheckFunction)(SIZE_T ip, BOOL checkingCurrentThread); PALIMPORT VOID PALAPI PAL_SetActivationFunction( IN PAL_ActivationFunction pActivationFunction, IN PAL_SafeActivationCheckFunction pSafeActivationCheckFunction); PALIMPORT BOOL PALAPI PAL_InjectActivation( IN HANDLE hThread ); #define VER_PLATFORM_WIN32_WINDOWS 1 #define VER_PLATFORM_WIN32_NT 2 #define VER_PLATFORM_UNIX 10 #define VER_PLATFORM_MACOSX 11 typedef struct _OSVERSIONINFOA { DWORD dwOSVersionInfoSize; DWORD dwMajorVersion; DWORD dwMinorVersion; DWORD dwBuildNumber; DWORD dwPlatformId; CHAR szCSDVersion[ 128 ]; } OSVERSIONINFOA, *POSVERSIONINFOA, *LPOSVERSIONINFOA; typedef struct _OSVERSIONINFOW { DWORD dwOSVersionInfoSize; DWORD dwMajorVersion; DWORD dwMinorVersion; DWORD dwBuildNumber; DWORD dwPlatformId; WCHAR szCSDVersion[ 128 ]; } OSVERSIONINFOW, *POSVERSIONINFOW, *LPOSVERSIONINFOW; #ifdef UNICODE typedef OSVERSIONINFOW OSVERSIONINFO; typedef POSVERSIONINFOW POSVERSIONINFO; typedef LPOSVERSIONINFOW LPOSVERSIONINFO; #else typedef OSVERSIONINFOA OSVERSIONINFO; typedef POSVERSIONINFOA POSVERSIONINFO; typedef LPOSVERSIONINFOA LPOSVERSIONINFO; #endif typedef struct _OSVERSIONINFOEXA { DWORD dwOSVersionInfoSize; DWORD dwMajorVersion; DWORD dwMinorVersion; DWORD dwBuildNumber; DWORD dwPlatformId; CHAR szCSDVersion[ 128 ]; WORD wServicePackMajor; WORD wServicePackMinor; WORD wSuiteMask; BYTE wProductType; BYTE wReserved; } OSVERSIONINFOEXA, *POSVERSIONINFOEXA, *LPOSVERSIONINFOEXA; typedef struct _OSVERSIONINFOEXW { DWORD dwOSVersionInfoSize; DWORD dwMajorVersion; DWORD dwMinorVersion; DWORD dwBuildNumber; DWORD dwPlatformId; WCHAR szCSDVersion[ 128 ]; WORD wServicePackMajor; WORD wServicePackMinor; WORD wSuiteMask; BYTE wProductType; BYTE wReserved; } OSVERSIONINFOEXW, *POSVERSIONINFOEXW, *LPOSVERSIONINFOEXW; #ifdef UNICODE typedef OSVERSIONINFOEXW OSVERSIONINFOEX; typedef POSVERSIONINFOEXW POSVERSIONINFOEX; typedef LPOSVERSIONINFOEXW LPOSVERSIONINFOEX; #else typedef OSVERSIONINFOEXA OSVERSIONINFOEX; typedef POSVERSIONINFOEXA POSVERSIONINFOEX; typedef LPOSVERSIONINFOEXA LPOSVERSIONINFOEX; #endif typedef struct _SYSTEM_INFO { WORD wProcessorArchitecture_PAL_Undefined; WORD wReserved_PAL_Undefined; // NOTE: diff from winbase.h - no obsolete dwOemId union DWORD dwPageSize; LPVOID lpMinimumApplicationAddress; LPVOID lpMaximumApplicationAddress; DWORD_PTR dwActiveProcessorMask_PAL_Undefined; DWORD dwNumberOfProcessors; DWORD dwProcessorType_PAL_Undefined; DWORD dwAllocationGranularity; WORD wProcessorLevel_PAL_Undefined; WORD wProcessorRevision_PAL_Undefined; } SYSTEM_INFO, *LPSYSTEM_INFO; PALIMPORT VOID PALAPI GetSystemInfo( OUT LPSYSTEM_INFO lpSystemInfo); PALIMPORT BOOL PALAPI CreatePipe( OUT PHANDLE hReadPipe, OUT PHANDLE hWritePipe, IN LPSECURITY_ATTRIBUTES lpPipeAttributes, IN DWORD nSize ); // // NUMA related APIs // PALIMPORT BOOL PALAPI GetNumaHighestNodeNumber( OUT PULONG HighestNodeNumber ); PALIMPORT BOOL PALAPI PAL_GetNumaProcessorNode(WORD procNo, WORD* node); PALIMPORT LPVOID PALAPI VirtualAllocExNuma( IN HANDLE hProcess, IN OPTIONAL LPVOID lpAddress, IN SIZE_T dwSize, IN DWORD flAllocationType, IN DWORD flProtect, IN DWORD nndPreferred ); PALIMPORT BOOL PALAPI PAL_SetCurrentThreadAffinity(WORD procNo); PALIMPORT BOOL PALAPI PAL_GetCurrentThreadAffinitySet(SIZE_T size, UINT_PTR* data); // // The types of events that can be logged. // #define EVENTLOG_SUCCESS 0x0000 #define EVENTLOG_ERROR_TYPE 0x0001 #define EVENTLOG_WARNING_TYPE 0x0002 #define EVENTLOG_INFORMATION_TYPE 0x0004 #define EVENTLOG_AUDIT_SUCCESS 0x0008 #define EVENTLOG_AUDIT_FAILURE 0x0010 #if defined FEATURE_PAL_ANSI #include "palprivate.h" #endif //FEATURE_PAL_ANSI /******************* C Runtime Entrypoints *******************************/ /* Some C runtime functions needs to be reimplemented by the PAL. To avoid name collisions, those functions have been renamed using defines */ #ifndef PAL_STDCPP_COMPAT #define exit PAL_exit #define printf PAL_printf #define vprintf PAL_vprintf #define wprintf PAL_wprintf #define wcstod PAL_wcstod #define wcstoul PAL_wcstoul #define wcscat PAL_wcscat #define wcscpy PAL_wcscpy #define wcslen PAL_wcslen #define wcsncmp PAL_wcsncmp #define wcschr PAL_wcschr #define wcsrchr PAL_wcsrchr #define wcsstr PAL_wcsstr #define swscanf PAL_swscanf #define wcspbrk PAL_wcspbrk #define wcscmp PAL_wcscmp #define wcsncpy PAL_wcsncpy #define realloc PAL_realloc #define fopen PAL_fopen #define strtok PAL_strtok #define strtoul PAL_strtoul #define strtoull PAL_strtoull #define fprintf PAL_fprintf #define fwprintf PAL_fwprintf #define vfprintf PAL_vfprintf #define vfwprintf PAL_vfwprintf #define rand PAL_rand #define time PAL_time #define getenv PAL_getenv #define fgets PAL_fgets #define qsort PAL_qsort #define bsearch PAL_bsearch #define ferror PAL_ferror #define fread PAL_fread #define fwrite PAL_fwrite #define ftell PAL_ftell #define fclose PAL_fclose #define fflush PAL_fflush #define fputs PAL_fputs #define fseek PAL_fseek #define fgetpos PAL_fgetpos #define fsetpos PAL_fsetpos #define setvbuf PAL_setvbuf #define acos PAL_acos #define asin PAL_asin #define atan2 PAL_atan2 #define exp PAL_exp #define ilogb PAL_ilogb #define log PAL_log #define log10 PAL_log10 #define pow PAL_pow #define sincos PAL_sincos #define acosf PAL_acosf #define asinf PAL_asinf #define atan2f PAL_atan2f #define expf PAL_expf #define ilogbf PAL_ilogbf #define logf PAL_logf #define log10f PAL_log10f #define powf PAL_powf #define sincosf PAL_sincosf #define malloc PAL_malloc #define free PAL_free #define _strdup PAL__strdup #define _open PAL__open #define _pread PAL__pread #define _close PAL__close #define _wcstoui64 PAL__wcstoui64 #define _flushall PAL__flushall #define strnlen PAL_strnlen #define wcsnlen PAL_wcsnlen #ifdef HOST_AMD64 #define _mm_getcsr PAL__mm_getcsr #define _mm_setcsr PAL__mm_setcsr #endif // HOST_AMD64 #endif // !PAL_STDCPP_COMPAT #ifndef _CONST_RETURN #ifdef __cplusplus #define _CONST_RETURN const #define _CRT_CONST_CORRECT_OVERLOADS #else #define _CONST_RETURN #endif #endif /* For backwards compatibility */ #define _WConst_return _CONST_RETURN #define EOF (-1) typedef int errno_t; #if defined(__WINT_TYPE__) typedef __WINT_TYPE__ wint_t; #else typedef unsigned int wint_t; #endif #ifndef PAL_STDCPP_COMPAT #if defined(_DEBUG) /*++ Function: PAL_memcpy Overlapping buffer-safe version of memcpy. See MSDN doc for memcpy --*/ EXTERN_C PALIMPORT DLLEXPORT void *PAL_memcpy (void *dest, const void * src, size_t count); PALIMPORT void * __cdecl memcpy(void *, const void *, size_t) THROW_DECL; #define memcpy PAL_memcpy #define IS_PAL_memcpy 1 #define TEST_PAL_DEFERRED(def) IS_##def #define IS_REDEFINED_IN_PAL(def) TEST_PAL_DEFERRED(def) #else //defined(_DEBUG) PALIMPORT void * __cdecl memcpy(void *, const void *, size_t); #endif //defined(_DEBUG) PALIMPORT int __cdecl memcmp(const void *, const void *, size_t); PALIMPORT void * __cdecl memset(void *, int, size_t); PALIMPORT void * __cdecl memmove(void *, const void *, size_t); PALIMPORT void * __cdecl memchr(const void *, int, size_t); PALIMPORT long long int __cdecl atoll(const char *) MATH_THROW_DECL; PALIMPORT size_t __cdecl strlen(const char *); PALIMPORT int __cdecl strcmp(const char*, const char *); PALIMPORT int __cdecl strncmp(const char*, const char *, size_t); PALIMPORT int __cdecl _strnicmp(const char *, const char *, size_t); PALIMPORT char * __cdecl strcat(char *, const char *); PALIMPORT char * __cdecl strncat(char *, const char *, size_t); PALIMPORT char * __cdecl strcpy(char *, const char *); PALIMPORT char * __cdecl strncpy(char *, const char *, size_t); PALIMPORT char * __cdecl strchr(const char *, int); PALIMPORT char * __cdecl strrchr(const char *, int); PALIMPORT char * __cdecl strpbrk(const char *, const char *); PALIMPORT char * __cdecl strstr(const char *, const char *); PALIMPORT char * __cdecl strtok(char *, const char *); PALIMPORT size_t __cdecl strspn(const char *, const char *); PALIMPORT size_t __cdecl strcspn(const char *, const char *); PALIMPORT int __cdecl atoi(const char *); PALIMPORT ULONG __cdecl strtoul(const char *, char **, int); PALIMPORT ULONGLONG __cdecl strtoull(const char *, char **, int); PALIMPORT double __cdecl atof(const char *); PALIMPORT double __cdecl strtod(const char *, char **); PALIMPORT int __cdecl isprint(int); PALIMPORT int __cdecl isspace(int); PALIMPORT int __cdecl isalpha(int); PALIMPORT int __cdecl isalnum(int); PALIMPORT int __cdecl isdigit(int); PALIMPORT int __cdecl isxdigit(int); PALIMPORT int __cdecl isupper(int); PALIMPORT int __cdecl islower(int); PALIMPORT int __cdecl tolower(int); PALIMPORT int __cdecl toupper(int); PALIMPORT int __cdecl iswalpha(wint_t); PALIMPORT int __cdecl iswdigit(wint_t); PALIMPORT int __cdecl iswupper(wint_t); PALIMPORT int __cdecl iswprint(wint_t); PALIMPORT int __cdecl iswspace(wint_t); PALIMPORT int __cdecl iswxdigit(wint_t); PALIMPORT wint_t __cdecl towupper(wint_t); PALIMPORT wint_t __cdecl towlower(wint_t); #endif // PAL_STDCPP_COMPAT /* _TRUNCATE */ #if !defined(_TRUNCATE) #define _TRUNCATE ((size_t)-1) #endif PALIMPORT DLLEXPORT errno_t __cdecl memcpy_s(void *, size_t, const void *, size_t) THROW_DECL; PALIMPORT errno_t __cdecl memmove_s(void *, size_t, const void *, size_t); PALIMPORT DLLEXPORT int __cdecl _stricmp(const char *, const char *); PALIMPORT DLLEXPORT int __cdecl vsprintf_s(char *, size_t, const char *, va_list); PALIMPORT char * __cdecl _gcvt_s(char *, int, double, int); PALIMPORT int __cdecl __iscsym(int); PALIMPORT DLLEXPORT int __cdecl _wcsicmp(const WCHAR *, const WCHAR*); PALIMPORT int __cdecl _wcsnicmp(const WCHAR *, const WCHAR *, size_t); PALIMPORT int __cdecl _vsnprintf(char *, size_t, const char *, va_list); PALIMPORT DLLEXPORT int __cdecl _vsnprintf_s(char *, size_t, size_t, const char *, va_list); PALIMPORT DLLEXPORT int __cdecl _vsnwprintf_s(WCHAR *, size_t, size_t, const WCHAR *, va_list); PALIMPORT DLLEXPORT int __cdecl _snwprintf_s(WCHAR *, size_t, size_t, const WCHAR *, ...); PALIMPORT DLLEXPORT int __cdecl _snprintf_s(char *, size_t, size_t, const char *, ...); PALIMPORT DLLEXPORT int __cdecl sprintf_s(char *, size_t, const char *, ... ); PALIMPORT DLLEXPORT int __cdecl swprintf_s(WCHAR *, size_t, const WCHAR *, ... ); PALIMPORT int __cdecl _snwprintf_s(WCHAR *, size_t, size_t, const WCHAR *, ...); PALIMPORT int __cdecl vswprintf_s( WCHAR *, size_t, const WCHAR *, va_list); PALIMPORT DLLEXPORT int __cdecl sscanf_s(const char *, const char *, ...); PALIMPORT DLLEXPORT errno_t __cdecl _itow_s(int, WCHAR *, size_t, int); PALIMPORT DLLEXPORT size_t __cdecl PAL_wcslen(const WCHAR *); PALIMPORT DLLEXPORT int __cdecl PAL_wcscmp(const WCHAR*, const WCHAR*); PALIMPORT DLLEXPORT int __cdecl PAL_wcsncmp(const WCHAR *, const WCHAR *, size_t); PALIMPORT DLLEXPORT WCHAR * __cdecl PAL_wcscat(WCHAR *, const WCHAR *); PALIMPORT WCHAR * __cdecl PAL_wcscpy(WCHAR *, const WCHAR *); PALIMPORT WCHAR * __cdecl PAL_wcsncpy(WCHAR *, const WCHAR *, size_t); PALIMPORT DLLEXPORT const WCHAR * __cdecl PAL_wcschr(const WCHAR *, WCHAR); PALIMPORT DLLEXPORT const WCHAR * __cdecl PAL_wcsrchr(const WCHAR *, WCHAR); PALIMPORT WCHAR _WConst_return * __cdecl PAL_wcspbrk(const WCHAR *, const WCHAR *); PALIMPORT DLLEXPORT WCHAR _WConst_return * __cdecl PAL_wcsstr(const WCHAR *, const WCHAR *); PALIMPORT int __cdecl PAL_swprintf(WCHAR *, const WCHAR *, ...); PALIMPORT int __cdecl PAL_vswprintf(WCHAR *, const WCHAR *, va_list); PALIMPORT int __cdecl PAL_swscanf(const WCHAR *, const WCHAR *, ...); PALIMPORT DLLEXPORT ULONG __cdecl PAL_wcstoul(const WCHAR *, WCHAR **, int); PALIMPORT double __cdecl PAL_wcstod(const WCHAR *, WCHAR **); PALIMPORT errno_t __cdecl _wcslwr_s(WCHAR *, size_t sz); PALIMPORT DLLEXPORT ULONGLONG _wcstoui64(const WCHAR *, WCHAR **, int); PALIMPORT DLLEXPORT errno_t __cdecl _i64tow_s(long long, WCHAR *, size_t, int); PALIMPORT int __cdecl _wtoi(const WCHAR *); #ifdef __cplusplus extern "C++" { inline WCHAR *PAL_wcschr(WCHAR* S, WCHAR C) {return ((WCHAR *)PAL_wcschr((const WCHAR *)S, C)); } inline WCHAR *PAL_wcsrchr(WCHAR* S, WCHAR C) {return ((WCHAR *)PAL_wcsrchr((const WCHAR *)S, C)); } inline WCHAR *PAL_wcspbrk(WCHAR* S, const WCHAR* P) {return ((WCHAR *)PAL_wcspbrk((const WCHAR *)S, P)); } inline WCHAR *PAL_wcsstr(WCHAR* S, const WCHAR* P) {return ((WCHAR *)PAL_wcsstr((const WCHAR *)S, P)); } } #endif #if defined(__llvm__) #define HAS_ROTL __has_builtin(_rotl) #define HAS_ROTR __has_builtin(_rotr) #else #define HAS_ROTL 0 #define HAS_ROTR 0 #endif #if !HAS_ROTL /*++ Function: _rotl See MSDN doc. --*/ EXTERN_C PALIMPORT inline unsigned int __cdecl _rotl(unsigned int value, int shift) { unsigned int retval = 0; shift &= 0x1f; retval = (value << shift) | (value >> (sizeof(int) * CHAR_BIT - shift)); return retval; } #endif // !HAS_ROTL // On 64 bit unix, make the long an int. #ifdef HOST_64BIT #define _lrotl _rotl #endif // HOST_64BIT #if !HAS_ROTR /*++ Function: _rotr See MSDN doc. --*/ EXTERN_C PALIMPORT inline unsigned int __cdecl _rotr(unsigned int value, int shift) { unsigned int retval; shift &= 0x1f; retval = (value >> shift) | (value << (sizeof(int) * CHAR_BIT - shift)); return retval; } #endif // !HAS_ROTR PALIMPORT int __cdecl abs(int); // clang complains if this is declared with __int64 PALIMPORT long long __cdecl llabs(long long); #ifndef PAL_STDCPP_COMPAT PALIMPORT int __cdecl _finite(double); PALIMPORT int __cdecl _isnan(double); PALIMPORT double __cdecl _copysign(double, double); PALIMPORT double __cdecl acos(double); PALIMPORT double __cdecl acosh(double) MATH_THROW_DECL; PALIMPORT double __cdecl asin(double); PALIMPORT double __cdecl asinh(double) MATH_THROW_DECL; PALIMPORT double __cdecl atan(double) MATH_THROW_DECL; PALIMPORT double __cdecl atanh(double) MATH_THROW_DECL; PALIMPORT double __cdecl atan2(double, double); PALIMPORT double __cdecl cbrt(double) MATH_THROW_DECL; PALIMPORT double __cdecl ceil(double); PALIMPORT double __cdecl cos(double); PALIMPORT double __cdecl cosh(double); PALIMPORT double __cdecl exp(double); PALIMPORT double __cdecl fabs(double); PALIMPORT double __cdecl floor(double); PALIMPORT double __cdecl fmod(double, double); PALIMPORT double __cdecl fma(double, double, double) MATH_THROW_DECL; PALIMPORT int __cdecl ilogb(double); PALIMPORT double __cdecl log(double); PALIMPORT double __cdecl log2(double) MATH_THROW_DECL; PALIMPORT double __cdecl log10(double); PALIMPORT double __cdecl modf(double, double*); PALIMPORT double __cdecl pow(double, double); PALIMPORT double __cdecl sin(double); PALIMPORT void __cdecl sincos(double, double*, double*); PALIMPORT double __cdecl sinh(double); PALIMPORT double __cdecl sqrt(double); PALIMPORT double __cdecl tan(double); PALIMPORT double __cdecl tanh(double); PALIMPORT double __cdecl trunc(double); PALIMPORT int __cdecl _finitef(float); PALIMPORT int __cdecl _isnanf(float); PALIMPORT float __cdecl _copysignf(float, float); PALIMPORT float __cdecl acosf(float); PALIMPORT float __cdecl acoshf(float) MATH_THROW_DECL; PALIMPORT float __cdecl asinf(float); PALIMPORT float __cdecl asinhf(float) MATH_THROW_DECL; PALIMPORT float __cdecl atanf(float) MATH_THROW_DECL; PALIMPORT float __cdecl atanhf(float) MATH_THROW_DECL; PALIMPORT float __cdecl atan2f(float, float); PALIMPORT float __cdecl cbrtf(float) MATH_THROW_DECL; PALIMPORT float __cdecl ceilf(float); PALIMPORT float __cdecl cosf(float); PALIMPORT float __cdecl coshf(float); PALIMPORT float __cdecl expf(float); PALIMPORT float __cdecl fabsf(float); PALIMPORT float __cdecl floorf(float); PALIMPORT float __cdecl fmodf(float, float); PALIMPORT float __cdecl fmaf(float, float, float) MATH_THROW_DECL; PALIMPORT int __cdecl ilogbf(float); PALIMPORT float __cdecl logf(float); PALIMPORT float __cdecl log2f(float) MATH_THROW_DECL; PALIMPORT float __cdecl log10f(float); PALIMPORT float __cdecl modff(float, float*); PALIMPORT float __cdecl powf(float, float); PALIMPORT float __cdecl sinf(float); PALIMPORT void __cdecl sincosf(float, float*, float*); PALIMPORT float __cdecl sinhf(float); PALIMPORT float __cdecl sqrtf(float); PALIMPORT float __cdecl tanf(float); PALIMPORT float __cdecl tanhf(float); PALIMPORT float __cdecl truncf(float); #endif // !PAL_STDCPP_COMPAT #ifndef PAL_STDCPP_COMPAT #ifdef __cplusplus extern "C++" { inline __int64 abs(__int64 _X) { return llabs(_X); } } #endif PALIMPORT DLLEXPORT void * __cdecl malloc(size_t); PALIMPORT DLLEXPORT void __cdecl free(void *); PALIMPORT DLLEXPORT void * __cdecl realloc(void *, size_t); PALIMPORT char * __cdecl _strdup(const char *); #if defined(_MSC_VER) #define alloca _alloca #else #define _alloca alloca #endif //_MSC_VER #define alloca __builtin_alloca #define max(a, b) (((a) > (b)) ? (a) : (b)) #define min(a, b) (((a) < (b)) ? (a) : (b)) #endif // !PAL_STDCPP_COMPAT PALIMPORT PAL_NORETURN void __cdecl exit(int); #ifndef PAL_STDCPP_COMPAT PALIMPORT DLLEXPORT void __cdecl qsort(void *, size_t, size_t, int(__cdecl *)(const void *, const void *)); PALIMPORT DLLEXPORT void * __cdecl bsearch(const void *, const void *, size_t, size_t, int(__cdecl *)(const void *, const void *)); PALIMPORT time_t __cdecl time(time_t *); #endif // !PAL_STDCPP_COMPAT PALIMPORT DLLEXPORT int __cdecl _open(const char *szPath, int nFlags, ...); PALIMPORT DLLEXPORT size_t __cdecl _pread(int fd, void *buf, size_t nbytes, ULONG64 offset); PALIMPORT DLLEXPORT int __cdecl _close(int); PALIMPORT DLLEXPORT int __cdecl _flushall(); #ifdef PAL_STDCPP_COMPAT struct _PAL_FILE; typedef struct _PAL_FILE PAL_FILE; #else // PAL_STDCPP_COMPAT struct _FILE; typedef struct _FILE FILE; typedef struct _FILE PAL_FILE; #define SEEK_SET 0 #define SEEK_CUR 1 #define SEEK_END 2 /* Locale categories */ #define LC_ALL 0 #define LC_COLLATE 1 #define LC_CTYPE 2 #define LC_MONETARY 3 #define LC_NUMERIC 4 #define LC_TIME 5 #define _IOFBF 0 /* setvbuf should set fully buffered */ #define _IOLBF 1 /* setvbuf should set line buffered */ #define _IONBF 2 /* setvbuf should set unbuffered */ #endif // PAL_STDCPP_COMPAT PALIMPORT int __cdecl PAL_fclose(PAL_FILE *); PALIMPORT DLLEXPORT int __cdecl PAL_fflush(PAL_FILE *); PALIMPORT size_t __cdecl PAL_fwrite(const void *, size_t, size_t, PAL_FILE *); PALIMPORT size_t __cdecl PAL_fread(void *, size_t, size_t, PAL_FILE *); PALIMPORT char * __cdecl PAL_fgets(char *, int, PAL_FILE *); PALIMPORT int __cdecl PAL_fputs(const char *, PAL_FILE *); PALIMPORT DLLEXPORT int __cdecl PAL_fprintf(PAL_FILE *, const char *, ...); PALIMPORT int __cdecl PAL_vfprintf(PAL_FILE *, const char *, va_list); PALIMPORT int __cdecl PAL_fseek(PAL_FILE *, LONG, int); PALIMPORT LONG __cdecl PAL_ftell(PAL_FILE *); PALIMPORT int __cdecl PAL_ferror(PAL_FILE *); PALIMPORT PAL_FILE * __cdecl PAL_fopen(const char *, const char *); PALIMPORT int __cdecl PAL_setvbuf(PAL_FILE *stream, char *, int, size_t); PALIMPORT DLLEXPORT int __cdecl PAL_fwprintf(PAL_FILE *, const WCHAR *, ...); PALIMPORT int __cdecl PAL_vfwprintf(PAL_FILE *, const WCHAR *, va_list); PALIMPORT int __cdecl PAL_wprintf(const WCHAR*, ...); PALIMPORT int __cdecl _getw(PAL_FILE *); PALIMPORT int __cdecl _putw(int, PAL_FILE *); PALIMPORT PAL_FILE * __cdecl _fdopen(int, const char *); PALIMPORT PAL_FILE * __cdecl _wfopen(const WCHAR *, const WCHAR *); /* Maximum value that can be returned by the rand function. */ #ifndef PAL_STDCPP_COMPAT #define RAND_MAX 0x7fff #endif // !PAL_STDCPP_COMPAT PALIMPORT int __cdecl rand(void); PALIMPORT void __cdecl srand(unsigned int); PALIMPORT DLLEXPORT int __cdecl printf(const char *, ...); PALIMPORT int __cdecl vprintf(const char *, va_list); #ifdef _MSC_VER #define PAL_get_caller _MSC_VER #else #define PAL_get_caller 0 #endif PALIMPORT DLLEXPORT PAL_FILE * __cdecl PAL_get_stdout(int caller); PALIMPORT PAL_FILE * __cdecl PAL_get_stdin(int caller); PALIMPORT DLLEXPORT PAL_FILE * __cdecl PAL_get_stderr(int caller); PALIMPORT DLLEXPORT int * __cdecl PAL_errno(int caller); #ifdef PAL_STDCPP_COMPAT #define PAL_stdout (PAL_get_stdout(PAL_get_caller)) #define PAL_stdin (PAL_get_stdin(PAL_get_caller)) #define PAL_stderr (PAL_get_stderr(PAL_get_caller)) #define PAL_errno (*PAL_errno(PAL_get_caller)) #else // PAL_STDCPP_COMPAT #define stdout (PAL_get_stdout(PAL_get_caller)) #define stdin (PAL_get_stdin(PAL_get_caller)) #define stderr (PAL_get_stderr(PAL_get_caller)) #define errno (*PAL_errno(PAL_get_caller)) #endif // PAL_STDCPP_COMPAT PALIMPORT DLLEXPORT char * __cdecl getenv(const char *); PALIMPORT DLLEXPORT int __cdecl _putenv(const char *); #define ERANGE 34 PALIMPORT WCHAR __cdecl PAL_ToUpperInvariant(WCHAR); PALIMPORT WCHAR __cdecl PAL_ToLowerInvariant(WCHAR); /******************* PAL-specific I/O completion port *****************/ typedef struct _PAL_IOCP_CPU_INFORMATION { union { FILETIME ftLastRecordedIdleTime; FILETIME ftLastRecordedCurrentTime; } LastRecordedTime; FILETIME ftLastRecordedKernelTime; FILETIME ftLastRecordedUserTime; } PAL_IOCP_CPU_INFORMATION; PALIMPORT INT PALAPI PAL_GetCPUBusyTime( IN OUT PAL_IOCP_CPU_INFORMATION *lpPrevCPUInfo); /****************PAL Perf functions for PInvoke*********************/ #if PAL_PERF PALIMPORT VOID PALAPI PAL_EnableProcessProfile(); PALIMPORT VOID PALAPI PAL_DisableProcessProfile(); PALIMPORT BOOL PALAPI PAL_IsProcessProfileEnabled(); PALIMPORT INT64 PALAPI PAL_GetCpuTickCount(); #endif // PAL_PERF /******************* PAL functions for SIMD extensions *****************/ PALIMPORT unsigned int _mm_getcsr(void); PALIMPORT void _mm_setcsr(unsigned int i); /******************* PAL functions for CPU capability detection *******/ #ifdef __cplusplus class CORJIT_FLAGS; PALIMPORT VOID PALAPI PAL_GetJitCpuCapabilityFlags(CORJIT_FLAGS *flags); #endif #ifdef __cplusplus PALIMPORT VOID PALAPI PAL_FreeExceptionRecords( IN EXCEPTION_RECORD *exceptionRecord, IN CONTEXT *contextRecord); #define EXCEPTION_CONTINUE_SEARCH 0 #define EXCEPTION_EXECUTE_HANDLER 1 #define EXCEPTION_CONTINUE_EXECUTION -1 struct PAL_SEHException { private: static const SIZE_T NoTargetFrameSp = (SIZE_T)SIZE_MAX; void Move(PAL_SEHException& ex) { ExceptionPointers.ExceptionRecord = ex.ExceptionPointers.ExceptionRecord; ExceptionPointers.ContextRecord = ex.ExceptionPointers.ContextRecord; TargetFrameSp = ex.TargetFrameSp; RecordsOnStack = ex.RecordsOnStack; ManagedToNativeExceptionCallback = ex.ManagedToNativeExceptionCallback; ManagedToNativeExceptionCallbackContext = ex.ManagedToNativeExceptionCallbackContext; ex.Clear(); } void FreeRecords() { if (ExceptionPointers.ExceptionRecord != NULL && !RecordsOnStack ) { PAL_FreeExceptionRecords(ExceptionPointers.ExceptionRecord, ExceptionPointers.ContextRecord); ExceptionPointers.ExceptionRecord = NULL; ExceptionPointers.ContextRecord = NULL; } } public: EXCEPTION_POINTERS ExceptionPointers; // Target frame stack pointer set before the 2nd pass. SIZE_T TargetFrameSp; bool RecordsOnStack; void(*ManagedToNativeExceptionCallback)(void* context); void* ManagedToNativeExceptionCallbackContext; PAL_SEHException(EXCEPTION_RECORD *pExceptionRecord, CONTEXT *pContextRecord, bool onStack = false) { ExceptionPointers.ExceptionRecord = pExceptionRecord; ExceptionPointers.ContextRecord = pContextRecord; TargetFrameSp = NoTargetFrameSp; RecordsOnStack = onStack; ManagedToNativeExceptionCallback = NULL; ManagedToNativeExceptionCallbackContext = NULL; } PAL_SEHException() { Clear(); } // The copy constructor and copy assignment operators are deleted so that the PAL_SEHException // can never be copied, only moved. This enables simple lifetime management of the exception and // context records, since there is always just one PAL_SEHException instance referring to the same records. PAL_SEHException(const PAL_SEHException& ex) = delete; PAL_SEHException& operator=(const PAL_SEHException& ex) = delete; PAL_SEHException(PAL_SEHException&& ex) { Move(ex); } PAL_SEHException& operator=(PAL_SEHException&& ex) { FreeRecords(); Move(ex); return *this; } ~PAL_SEHException() { FreeRecords(); } void Clear() { ExceptionPointers.ExceptionRecord = NULL; ExceptionPointers.ContextRecord = NULL; TargetFrameSp = NoTargetFrameSp; RecordsOnStack = false; ManagedToNativeExceptionCallback = NULL; ManagedToNativeExceptionCallbackContext = NULL; } CONTEXT* GetContextRecord() { return ExceptionPointers.ContextRecord; } EXCEPTION_RECORD* GetExceptionRecord() { return ExceptionPointers.ExceptionRecord; } bool IsFirstPass() { return (TargetFrameSp == NoTargetFrameSp); } void SecondPassDone() { TargetFrameSp = NoTargetFrameSp; } bool HasPropagateExceptionCallback() { return ManagedToNativeExceptionCallback != NULL; } void SetPropagateExceptionCallback( void(*callback)(void*), void* context) { ManagedToNativeExceptionCallback = callback; ManagedToNativeExceptionCallbackContext = context; } }; typedef BOOL (*PHARDWARE_EXCEPTION_HANDLER)(PAL_SEHException* ex); typedef BOOL (*PHARDWARE_EXCEPTION_SAFETY_CHECK_FUNCTION)(PCONTEXT contextRecord, PEXCEPTION_RECORD exceptionRecord); typedef VOID (*PTERMINATION_REQUEST_HANDLER)(int terminationExitCode); typedef DWORD (*PGET_GCMARKER_EXCEPTION_CODE)(LPVOID ip); PALIMPORT VOID PALAPI PAL_SetHardwareExceptionHandler( IN PHARDWARE_EXCEPTION_HANDLER exceptionHandler, IN PHARDWARE_EXCEPTION_SAFETY_CHECK_FUNCTION exceptionCheckFunction); PALIMPORT VOID PALAPI PAL_SetGetGcMarkerExceptionCode( IN PGET_GCMARKER_EXCEPTION_CODE getGcMarkerExceptionCode); PALIMPORT VOID PALAPI PAL_ThrowExceptionFromContext( IN CONTEXT* context, IN PAL_SEHException* ex); PALIMPORT VOID PALAPI PAL_SetTerminationRequestHandler( IN PTERMINATION_REQUEST_HANDLER terminationRequestHandler); PALIMPORT VOID PALAPI PAL_CatchHardwareExceptionHolderEnter(); PALIMPORT VOID PALAPI PAL_CatchHardwareExceptionHolderExit(); // // This holder is used to indicate that a hardware // exception should be raised as a C++ exception // to better emulate SEH on the xplat platforms. // class CatchHardwareExceptionHolder { public: CatchHardwareExceptionHolder() { PAL_CatchHardwareExceptionHolderEnter(); } ~CatchHardwareExceptionHolder() { PAL_CatchHardwareExceptionHolderExit(); } static bool IsEnabled(); }; // // NOTE: This is only defined in one PAL test. // #ifdef FEATURE_ENABLE_HARDWARE_EXCEPTIONS #define HardwareExceptionHolder CatchHardwareExceptionHolder __catchHardwareException; #else #define HardwareExceptionHolder #endif // FEATURE_ENABLE_HARDWARE_EXCEPTIONS class NativeExceptionHolderBase; PALIMPORT PALAPI NativeExceptionHolderBase ** PAL_GetNativeExceptionHolderHead(); extern "C++" { // // This is the base class of native exception holder used to provide // the filter function to the exception dispatcher. This allows the // filter to be called during the first pass to better emulate SEH // the xplat platforms that only have C++ exception support. // class NativeExceptionHolderBase { // Save the address of the holder head so the destructor // doesn't have access the slow (on Linux) TLS value again. NativeExceptionHolderBase **m_head; // The next holder on the stack NativeExceptionHolderBase *m_next; protected: NativeExceptionHolderBase() { m_head = nullptr; m_next = nullptr; } ~NativeExceptionHolderBase() { // Only destroy if Push was called if (m_head != nullptr) { *m_head = m_next; m_head = nullptr; m_next = nullptr; } } public: // Calls the holder's filter handler. virtual EXCEPTION_DISPOSITION InvokeFilter(PAL_SEHException& ex) = 0; // Adds the holder to the "stack" of holders. This is done explicitly instead // of in the constructor was to avoid the mess of move constructors combined // with return value optimization (in CreateHolder). void Push() { NativeExceptionHolderBase **head = PAL_GetNativeExceptionHolderHead(); m_head = head; m_next = *head; *head = this; } // Given the currentHolder and locals stack range find the next holder starting with this one // To find the first holder, pass nullptr as the currentHolder. static NativeExceptionHolderBase *FindNextHolder(NativeExceptionHolderBase *currentHolder, PVOID frameLowAddress, PVOID frameHighAddress); }; // // This is the second part of the native exception filter holder. It is // templated because the lambda used to wrap the exception filter is a // unknown type. // template<class FilterType> class NativeExceptionHolder : public NativeExceptionHolderBase { FilterType* m_exceptionFilter; public: NativeExceptionHolder(FilterType* exceptionFilter) : NativeExceptionHolderBase() { m_exceptionFilter = exceptionFilter; } virtual EXCEPTION_DISPOSITION InvokeFilter(PAL_SEHException& ex) { return (*m_exceptionFilter)(ex); } }; // // This is a native exception holder that is used when the catch catches // all exceptions. // class NativeExceptionHolderCatchAll : public NativeExceptionHolderBase { public: NativeExceptionHolderCatchAll() : NativeExceptionHolderBase() { } virtual EXCEPTION_DISPOSITION InvokeFilter(PAL_SEHException& ex) { return EXCEPTION_EXECUTE_HANDLER; } }; // This is a native exception holder that doesn't catch any exceptions. class NativeExceptionHolderNoCatch : public NativeExceptionHolderBase { public: NativeExceptionHolderNoCatch() : NativeExceptionHolderBase() { } virtual EXCEPTION_DISPOSITION InvokeFilter(PAL_SEHException& ex) { return EXCEPTION_CONTINUE_SEARCH; } }; // // This factory class for the native exception holder is necessary because // templated functions don't need the explicit type parameter and can infer // the template type from the parameter. // class NativeExceptionHolderFactory { public: template<class FilterType> static NativeExceptionHolder<FilterType> CreateHolder(FilterType* exceptionFilter) { return NativeExceptionHolder<FilterType>(exceptionFilter); } }; // Start of a try block for exceptions raised by RaiseException #define PAL_TRY(__ParamType, __paramDef, __paramRef) \ { \ __ParamType __param = __paramRef; \ auto tryBlock = [](__ParamType __paramDef) \ { // Start of an exception handler. If an exception raised by the RaiseException // occurs in the try block and the disposition is EXCEPTION_EXECUTE_HANDLER, // the handler code is executed. If the disposition is EXCEPTION_CONTINUE_SEARCH, // the exception is rethrown. The EXCEPTION_CONTINUE_EXECUTION disposition is // not supported. #define PAL_EXCEPT(dispositionExpression) \ }; \ const bool isFinally = false; \ auto finallyBlock = []() {}; \ EXCEPTION_DISPOSITION disposition = EXCEPTION_CONTINUE_EXECUTION; \ auto exceptionFilter = [&disposition, &__param](PAL_SEHException& ex) \ { \ (void)__param; \ disposition = dispositionExpression; \ _ASSERTE(disposition != EXCEPTION_CONTINUE_EXECUTION); \ return disposition; \ }; \ try \ { \ HardwareExceptionHolder \ auto __exceptionHolder = NativeExceptionHolderFactory::CreateHolder(&exceptionFilter); \ __exceptionHolder.Push(); \ tryBlock(__param); \ } \ catch (PAL_SEHException& ex) \ { \ if (disposition == EXCEPTION_CONTINUE_EXECUTION) \ { \ exceptionFilter(ex); \ } \ if (disposition == EXCEPTION_CONTINUE_SEARCH) \ { \ throw; \ } \ ex.SecondPassDone(); // Start of an exception handler. It works the same way as the PAL_EXCEPT except // that the disposition is obtained by calling the specified filter. #define PAL_EXCEPT_FILTER(filter) PAL_EXCEPT(filter(&ex.ExceptionPointers, __param)) // Start of a finally block. The finally block is executed both when the try block // finishes or when an exception is raised using the RaiseException in it. #define PAL_FINALLY \ }; \ const bool isFinally = true; \ auto finallyBlock = [&]() \ { // End of an except or a finally block. #define PAL_ENDTRY \ }; \ if (isFinally) \ { \ try \ { \ tryBlock(__param); \ } \ catch (...) \ { \ finallyBlock(); \ throw; \ } \ finallyBlock(); \ } \ } } // extern "C++" #define PAL_CPP_THROW(type, obj) { throw obj; } #define PAL_CPP_RETHROW { throw; } #define PAL_CPP_TRY try { HardwareExceptionHolder #define PAL_CPP_CATCH_EXCEPTION(ident) } catch (Exception *ident) { #define PAL_CPP_CATCH_EXCEPTION_NOARG } catch (Exception *) { #define PAL_CPP_CATCH_DERIVED(type, ident) } catch (type *ident) { #define PAL_CPP_CATCH_ALL } catch (...) { \ try { throw; } \ catch (PAL_SEHException& ex) { ex.SecondPassDone(); } \ catch (...) {} #define PAL_CPP_ENDTRY } #define PAL_TRY_FOR_DLLMAIN(ParamType, paramDef, paramRef, _reason) PAL_TRY(ParamType, paramDef, paramRef) #endif // __cplusplus // Platform-specific library naming // #ifdef __APPLE__ #define MAKEDLLNAME_W(name) u"lib" name u".dylib" #define MAKEDLLNAME_A(name) "lib" name ".dylib" #else #define MAKEDLLNAME_W(name) u"lib" name u".so" #define MAKEDLLNAME_A(name) "lib" name ".so" #endif #ifdef UNICODE #define MAKEDLLNAME(x) MAKEDLLNAME_W(x) #else #define MAKEDLLNAME(x) MAKEDLLNAME_A(x) #endif #define PAL_SHLIB_PREFIX "lib" #define PAL_SHLIB_PREFIX_W u"lib" #if __APPLE__ #define PAL_SHLIB_SUFFIX ".dylib" #define PAL_SHLIB_SUFFIX_W u".dylib" #else #define PAL_SHLIB_SUFFIX ".so" #define PAL_SHLIB_SUFFIX_W u".so" #endif #define DBG_EXCEPTION_HANDLED ((DWORD )0x00010001L) #define DBG_CONTINUE ((DWORD )0x00010002L) #define DBG_EXCEPTION_NOT_HANDLED ((DWORD )0x80010001L) #define DBG_TERMINATE_THREAD ((DWORD )0x40010003L) #define DBG_TERMINATE_PROCESS ((DWORD )0x40010004L) #define DBG_CONTROL_C ((DWORD )0x40010005L) #define DBG_RIPEXCEPTION ((DWORD )0x40010007L) #define DBG_CONTROL_BREAK ((DWORD )0x40010008L) #define DBG_COMMAND_EXCEPTION ((DWORD )0x40010009L) #define STATUS_USER_APC ((DWORD )0x000000C0L) #define STATUS_GUARD_PAGE_VIOLATION ((DWORD )0x80000001L) #define STATUS_DATATYPE_MISALIGNMENT ((DWORD )0x80000002L) #define STATUS_BREAKPOINT ((DWORD )0x80000003L) #define STATUS_SINGLE_STEP ((DWORD )0x80000004L) #define STATUS_LONGJUMP ((DWORD )0x80000026L) #define STATUS_UNWIND_CONSOLIDATE ((DWORD )0x80000029L) #define STATUS_ACCESS_VIOLATION ((DWORD )0xC0000005L) #define STATUS_IN_PAGE_ERROR ((DWORD )0xC0000006L) #define STATUS_INVALID_HANDLE ((DWORD )0xC0000008L) #define STATUS_NO_MEMORY ((DWORD )0xC0000017L) #define STATUS_ILLEGAL_INSTRUCTION ((DWORD )0xC000001DL) #define STATUS_NONCONTINUABLE_EXCEPTION ((DWORD )0xC0000025L) #define STATUS_INVALID_DISPOSITION ((DWORD )0xC0000026L) #define STATUS_ARRAY_BOUNDS_EXCEEDED ((DWORD )0xC000008CL) #define STATUS_FLOAT_DENORMAL_OPERAND ((DWORD )0xC000008DL) #define STATUS_FLOAT_DIVIDE_BY_ZERO ((DWORD )0xC000008EL) #define STATUS_FLOAT_INEXACT_RESULT ((DWORD )0xC000008FL) #define STATUS_FLOAT_INVALID_OPERATION ((DWORD )0xC0000090L) #define STATUS_FLOAT_OVERFLOW ((DWORD )0xC0000091L) #define STATUS_FLOAT_STACK_CHECK ((DWORD )0xC0000092L) #define STATUS_FLOAT_UNDERFLOW ((DWORD )0xC0000093L) #define STATUS_INTEGER_DIVIDE_BY_ZERO ((DWORD )0xC0000094L) #define STATUS_INTEGER_OVERFLOW ((DWORD )0xC0000095L) #define STATUS_PRIVILEGED_INSTRUCTION ((DWORD )0xC0000096L) #define STATUS_STACK_OVERFLOW ((DWORD )0xC00000FDL) #define STATUS_CONTROL_C_EXIT ((DWORD )0xC000013AL) #define WAIT_IO_COMPLETION STATUS_USER_APC #define EXCEPTION_ACCESS_VIOLATION STATUS_ACCESS_VIOLATION #define EXCEPTION_DATATYPE_MISALIGNMENT STATUS_DATATYPE_MISALIGNMENT #define EXCEPTION_BREAKPOINT STATUS_BREAKPOINT #define EXCEPTION_SINGLE_STEP STATUS_SINGLE_STEP #define EXCEPTION_ARRAY_BOUNDS_EXCEEDED STATUS_ARRAY_BOUNDS_EXCEEDED #define EXCEPTION_FLT_DENORMAL_OPERAND STATUS_FLOAT_DENORMAL_OPERAND #define EXCEPTION_FLT_DIVIDE_BY_ZERO STATUS_FLOAT_DIVIDE_BY_ZERO #define EXCEPTION_FLT_INEXACT_RESULT STATUS_FLOAT_INEXACT_RESULT #define EXCEPTION_FLT_INVALID_OPERATION STATUS_FLOAT_INVALID_OPERATION #define EXCEPTION_FLT_OVERFLOW STATUS_FLOAT_OVERFLOW #define EXCEPTION_FLT_STACK_CHECK STATUS_FLOAT_STACK_CHECK #define EXCEPTION_FLT_UNDERFLOW STATUS_FLOAT_UNDERFLOW #define EXCEPTION_INT_DIVIDE_BY_ZERO STATUS_INTEGER_DIVIDE_BY_ZERO #define EXCEPTION_INT_OVERFLOW STATUS_INTEGER_OVERFLOW #define EXCEPTION_PRIV_INSTRUCTION STATUS_PRIVILEGED_INSTRUCTION #define EXCEPTION_IN_PAGE_ERROR STATUS_IN_PAGE_ERROR #define EXCEPTION_ILLEGAL_INSTRUCTION STATUS_ILLEGAL_INSTRUCTION #define EXCEPTION_NONCONTINUABLE_EXCEPTION STATUS_NONCONTINUABLE_EXCEPTION #define EXCEPTION_STACK_OVERFLOW STATUS_STACK_OVERFLOW #define EXCEPTION_INVALID_DISPOSITION STATUS_INVALID_DISPOSITION #define EXCEPTION_GUARD_PAGE STATUS_GUARD_PAGE_VIOLATION #define EXCEPTION_INVALID_HANDLE STATUS_INVALID_HANDLE #define CONTROL_C_EXIT STATUS_CONTROL_C_EXIT /******************* HRESULT types ****************************************/ #define FACILITY_WINDOWS 8 #define FACILITY_URT 19 #define FACILITY_UMI 22 #define FACILITY_SXS 23 #define FACILITY_STORAGE 3 #define FACILITY_SSPI 9 #define FACILITY_SCARD 16 #define FACILITY_SETUPAPI 15 #define FACILITY_SECURITY 9 #define FACILITY_RPC 1 #define FACILITY_WIN32 7 #define FACILITY_CONTROL 10 #define FACILITY_NULL 0 #define FACILITY_MSMQ 14 #define FACILITY_MEDIASERVER 13 #define FACILITY_INTERNET 12 #define FACILITY_ITF 4 #define FACILITY_DPLAY 21 #define FACILITY_DISPATCH 2 #define FACILITY_COMPLUS 17 #define FACILITY_CERT 11 #define FACILITY_ACS 20 #define FACILITY_AAF 18 #define NO_ERROR 0L #define SEVERITY_SUCCESS 0 #define SEVERITY_ERROR 1 #define SUCCEEDED(Status) ((HRESULT)(Status) >= 0) #define FAILED(Status) ((HRESULT)(Status)<0) #define IS_ERROR(Status) ((ULONG)(Status) >> 31 == SEVERITY_ERROR) // diff from win32 #define HRESULT_CODE(hr) ((hr) & 0xFFFF) #define SCODE_CODE(sc) ((sc) & 0xFFFF) #define HRESULT_FACILITY(hr) (((hr) >> 16) & 0x1fff) #define SCODE_FACILITY(sc) (((sc) >> 16) & 0x1fff) #define HRESULT_SEVERITY(hr) (((hr) >> 31) & 0x1) #define SCODE_SEVERITY(sc) (((sc) >> 31) & 0x1) // both macros diff from Win32 #define MAKE_HRESULT(sev,fac,code) \ ((HRESULT) (((ULONG)(sev)<<31) | ((ULONG)(fac)<<16) | ((ULONG)(code))) ) #define MAKE_SCODE(sev,fac,code) \ ((SCODE) (((ULONG)(sev)<<31) | ((ULONG)(fac)<<16) | ((LONG)(code))) ) #define FACILITY_NT_BIT 0x10000000 #define HRESULT_FROM_WIN32(x) ((HRESULT)(x) <= 0 ? ((HRESULT)(x)) : ((HRESULT) (((x) & 0x0000FFFF) | (FACILITY_WIN32 << 16) | 0x80000000))) #define __HRESULT_FROM_WIN32(x) HRESULT_FROM_WIN32(x) #define HRESULT_FROM_NT(x) ((HRESULT) ((x) | FACILITY_NT_BIT)) #ifdef __cplusplus } #endif #endif // __PAL_H__
1
dotnet/runtime
66,234
Remove compiler warning suppression
Contributes to https://github.com/dotnet/runtime/issues/66154 All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address. ~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~ Upstream PR: https://github.com/libunwind/libunwind/pull/333 /cc @GrabYourPitchforks
AaronRobinsonMSFT
2022-03-05T03:45:49Z
2022-03-09T00:57:12Z
220e67755ae6323a01011b83070dff6f84b02519
853e494abd1c1e12d28a76829b4680af7f694afa
Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154 All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address. ~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~ Upstream PR: https://github.com/libunwind/libunwind/pull/333 /cc @GrabYourPitchforks
./src/coreclr/pal/src/libunwind/CMakeLists.txt
# This is a custom file written for .NET Core's build system # It overwrites the one found in upstream project(unwind) set(CMAKE_INCLUDE_CURRENT_DIR ON) # define variables for the configure_file below set(PKG_MAJOR "1") set(PKG_MINOR "5") set(PKG_EXTRA "-rc2") # The HAVE___THREAD set to 1 causes creation of thread local variable with tls_model("initial-exec") # which is incompatible with usage of the unwind code in a shared library. add_definitions(-DHAVE___THREAD=0) add_definitions(-D_GNU_SOURCE) add_definitions(-DPACKAGE_STRING="") add_definitions(-DPACKAGE_BUGREPORT="") if(CLR_CMAKE_HOST_UNIX) if (CLR_CMAKE_HOST_ARCH_AMD64) set(arch x86_64) elseif(CLR_CMAKE_HOST_ARCH_ARM64) set(arch aarch64) elseif(CLR_CMAKE_HOST_ARCH_ARM) set(arch arm) elseif(CLR_CMAKE_HOST_ARCH_ARMV6) set(arch arm) elseif(CLR_CMAKE_HOST_ARCH_I386) set(arch x86) elseif(CLR_CMAKE_HOST_ARCH_S390X) set(arch s390x) elseif(CLR_CMAKE_HOST_ARCH_LOONGARCH64) set(arch loongarch64) endif () # Disable warning due to incorrect format specifier in debugging printf via the Debug macro add_compile_options(-Wno-format -Wno-format-security) add_compile_options(-Wno-implicit-fallthrough) if (CMAKE_CXX_COMPILER_ID MATCHES "Clang") add_compile_options(-Wno-header-guard) else() add_compile_options(-Wno-unused-value) add_compile_options(-Wno-unused-result) add_compile_options(-Wno-implicit-function-declaration) add_compile_options(-Wno-incompatible-pointer-types) endif() if(CLR_CMAKE_HOST_ARCH_ARM OR CLR_CMAKE_HOST_ARCH_ARMV6) # Ensure that the remote and local unwind code can reside in the same binary without name clashing add_definitions("-Darm_search_unwind_table=UNW_OBJ(arm_search_unwind_table)") if (CMAKE_CXX_COMPILER_ID MATCHES "Clang") # Disable warning due to labs function called on unsigned argument add_compile_options(-Wno-absolute-value) # Disable warning in asm: use of SP or PC in the list is deprecated add_compile_options(-Wno-inline-asm) endif() # Disable warning for a bug in the libunwind source src/arm/Gtrace.c:529, but not in code that we exercise add_compile_options(-Wno-implicit-function-declaration) # Disable warning due to an unused function prel31_read add_compile_options(-Wno-unused-function) # We compile code with -std=c99 and the asm keyword is not recognized as it is a gnu extension add_definitions(-Dasm=__asm__) elseif(CLR_CMAKE_HOST_ARCH_ARM64) if (CMAKE_CXX_COMPILER_ID MATCHES "Clang") # Disable warning due to labs function called on unsigned argument add_compile_options(-Wno-absolute-value) endif() # We compile code with -std=c99 and the asm keyword is not recognized as it is a gnu extension add_definitions(-Dasm=__asm__) # Disable warning for a bug in the libunwind source src/aarch64/Ginit.c, but not in code that we exercise add_compile_options(-Wno-incompatible-pointer-types) elseif(CLR_CMAKE_HOST_ARCH_I386) # Disable warning for a bug in the libunwind source src/x86/Gos-linux.c, but not in code that we exercise add_compile_options(-Wno-incompatible-pointer-types) elseif(CLR_CMAKE_HOST_ARCH_LOONGARCH64) ###TODO: maybe add options for LOONGARCH64 endif() if (CLR_CMAKE_HOST_OSX) add_definitions(-DUNW_REMOTE_ONLY) add_compile_options(-Wno-sometimes-uninitialized) add_compile_options(-Wno-implicit-function-declaration) # Our posix abstraction layer will provide these headers set(HAVE_ELF_H 1) set(HAVE_ENDIAN_H 1) # include paths include_directories(include/tdep) include_directories(include) include_directories(${CMAKE_CURRENT_BINARY_DIR}/include/tdep) include_directories(${CMAKE_CURRENT_BINARY_DIR}/include) # files for macos compilation include_directories(../libunwind_mac/include) endif(CLR_CMAKE_HOST_OSX) endif(CLR_CMAKE_HOST_UNIX) if(CLR_CMAKE_HOST_WIN32) if (CLR_CMAKE_TARGET_ARCH_AMD64) set(TARGET_AMD64 1) set(arch x86_64) add_definitions(-D__x86_64__) add_definitions(-D__amd64__) elseif(CLR_CMAKE_TARGET_ARCH_ARM64) set(TARGET_AARCH64 1) set(arch aarch64) add_definitions(-D__aarch64__) elseif(CLR_CMAKE_TARGET_ARCH_ARM) set(TARGET_ARM 1) set(arch arm) add_definitions(-D__arm__) elseif(CLR_CMAKE_TARGET_ARCH_ARMV6) set(TARGET_ARM 1) set(arch arm) add_definitions(-D__arm__) elseif(CLR_CMAKE_TARGET_ARCH_S390X) set(TARGET_S390X 1) set(arch s390x) add_definitions(-D__s390x__) else () message(FATAL_ERROR "Unrecognized TARGET") endif () # Windows builds will only support remote unwind add_definitions(-DUNW_REMOTE_ONLY) add_definitions(-DHAVE_UNW_GET_ACCESSORS) # Disable security warnings add_definitions(-D_CRT_SECURE_NO_WARNINGS) if(CLR_CMAKE_TARGET_LINUX) add_definitions(-D__linux__) endif () # Assume we are using default MSVC compiler add_compile_options(/TC) # compile all files as C add_compile_options(/permissive-) # include paths include_directories(include/tdep) include_directories(include) include_directories(${CMAKE_CURRENT_BINARY_DIR}/include/tdep) include_directories(${CMAKE_CURRENT_BINARY_DIR}/include) # files for cross os compilation include_directories(include/win) add_definitions(-D_CRT_DECLARE_NONSTDC_NAMES) # Warnings in release builds add_compile_options(-wd4068) # ignore unknown pragma warnings (gcc pragmas) add_compile_options(-wd4146) # minus operator applied to unsigned add_compile_options(-wd4244) # possible loss of data add_compile_options(-wd4267) # possible loss of data add_compile_options(-wd4334) # 32-bit shift implicitly converted to 64 bits # Disable warning due to incorrect format specifier in debugging printf via the Debug macro add_compile_options(-wd4311) # pointer truncation from 'unw_word_t *' to 'long' add_compile_options(-wd4475) # 'fprintf' : length modifier 'L' cannot be used add_compile_options(-wd4477) # fprintf argument type endif (CLR_CMAKE_HOST_WIN32) if(CLR_CMAKE_TARGET_ARCH_ARM OR CLR_CMAKE_TARGET_ARCH_ARMV6) # The arm sources include ex_tables.h from include/tdep-arm without going through a redirection # in include/tdep like it works for similar files on other architectures. So we need to add # the include/tdep-arm to include directories include_directories(include/tdep-arm) endif() include(configure.cmake) add_subdirectory(src)
# This is a custom file written for .NET Core's build system # It overwrites the one found in upstream project(unwind) set(CMAKE_INCLUDE_CURRENT_DIR ON) # define variables for the configure_file below set(PKG_MAJOR "1") set(PKG_MINOR "5") set(PKG_EXTRA "-rc2") # The HAVE___THREAD set to 1 causes creation of thread local variable with tls_model("initial-exec") # which is incompatible with usage of the unwind code in a shared library. add_definitions(-DHAVE___THREAD=0) add_definitions(-D_GNU_SOURCE) add_definitions(-DPACKAGE_STRING="") add_definitions(-DPACKAGE_BUGREPORT="") if(CLR_CMAKE_HOST_UNIX) if (CLR_CMAKE_HOST_ARCH_AMD64) set(arch x86_64) elseif(CLR_CMAKE_HOST_ARCH_ARM64) set(arch aarch64) elseif(CLR_CMAKE_HOST_ARCH_ARM) set(arch arm) elseif(CLR_CMAKE_HOST_ARCH_ARMV6) set(arch arm) elseif(CLR_CMAKE_HOST_ARCH_I386) set(arch x86) elseif(CLR_CMAKE_HOST_ARCH_S390X) set(arch s390x) elseif(CLR_CMAKE_HOST_ARCH_LOONGARCH64) set(arch loongarch64) endif () # Disable warning due to incorrect format specifier in debugging printf via the Debug macro add_compile_options(-Wno-format -Wno-format-security) add_compile_options(-Wno-implicit-fallthrough) if (CMAKE_CXX_COMPILER_ID MATCHES "Clang") add_compile_options(-Wno-header-guard) else() add_compile_options(-Wno-unused-value) add_compile_options(-Wno-unused-result) add_compile_options(-Wno-implicit-function-declaration) add_compile_options(-Wno-incompatible-pointer-types) endif() if(CLR_CMAKE_HOST_ARCH_ARM OR CLR_CMAKE_HOST_ARCH_ARMV6) # Ensure that the remote and local unwind code can reside in the same binary without name clashing add_definitions("-Darm_search_unwind_table=UNW_OBJ(arm_search_unwind_table)") if (CMAKE_CXX_COMPILER_ID MATCHES "Clang") # Disable warning due to labs function called on unsigned argument add_compile_options(-Wno-absolute-value) # Disable warning in asm: use of SP or PC in the list is deprecated add_compile_options(-Wno-inline-asm) endif() # Disable warning for a bug in the libunwind source src/arm/Gtrace.c:529, but not in code that we exercise add_compile_options(-Wno-implicit-function-declaration) # Disable warning due to an unused function prel31_read add_compile_options(-Wno-unused-function) # We compile code with -std=c99 and the asm keyword is not recognized as it is a gnu extension add_definitions(-Dasm=__asm__) elseif(CLR_CMAKE_HOST_ARCH_ARM64) if (CMAKE_CXX_COMPILER_ID MATCHES "Clang") # Disable warning due to labs function called on unsigned argument add_compile_options(-Wno-absolute-value) endif() # We compile code with -std=c99 and the asm keyword is not recognized as it is a gnu extension add_definitions(-Dasm=__asm__) # Disable warning for a bug in the libunwind source src/aarch64/Ginit.c, but not in code that we exercise add_compile_options(-Wno-incompatible-pointer-types) elseif(CLR_CMAKE_HOST_ARCH_I386) # Disable warning for a bug in the libunwind source src/x86/Gos-linux.c, but not in code that we exercise add_compile_options(-Wno-incompatible-pointer-types) elseif(CLR_CMAKE_HOST_ARCH_LOONGARCH64) ###TODO: maybe add options for LOONGARCH64 endif() if (CLR_CMAKE_HOST_OSX) add_definitions(-DUNW_REMOTE_ONLY) add_compile_options(-Wno-sometimes-uninitialized) add_compile_options(-Wno-implicit-function-declaration) # Our posix abstraction layer will provide these headers set(HAVE_ELF_H 1) set(HAVE_ENDIAN_H 1) # include paths include_directories(include/tdep) include_directories(include) include_directories(${CMAKE_CURRENT_BINARY_DIR}/include/tdep) include_directories(${CMAKE_CURRENT_BINARY_DIR}/include) # files for macos compilation include_directories(../libunwind_mac/include) endif(CLR_CMAKE_HOST_OSX) endif(CLR_CMAKE_HOST_UNIX) if(CLR_CMAKE_HOST_WIN32) if (CLR_CMAKE_TARGET_ARCH_AMD64) set(TARGET_AMD64 1) set(arch x86_64) add_definitions(-D__x86_64__) add_definitions(-D__amd64__) elseif(CLR_CMAKE_TARGET_ARCH_ARM64) set(TARGET_AARCH64 1) set(arch aarch64) add_definitions(-D__aarch64__) elseif(CLR_CMAKE_TARGET_ARCH_ARM) set(TARGET_ARM 1) set(arch arm) add_definitions(-D__arm__) elseif(CLR_CMAKE_TARGET_ARCH_ARMV6) set(TARGET_ARM 1) set(arch arm) add_definitions(-D__arm__) elseif(CLR_CMAKE_TARGET_ARCH_S390X) set(TARGET_S390X 1) set(arch s390x) add_definitions(-D__s390x__) else () message(FATAL_ERROR "Unrecognized TARGET") endif () # Windows builds will only support remote unwind add_definitions(-DUNW_REMOTE_ONLY) add_definitions(-DHAVE_UNW_GET_ACCESSORS) # Disable security warnings add_definitions(-D_CRT_SECURE_NO_WARNINGS) if(CLR_CMAKE_TARGET_LINUX) add_definitions(-D__linux__) endif () # Assume we are using default MSVC compiler add_compile_options(/TC) # compile all files as C add_compile_options(/permissive-) # include paths include_directories(include/tdep) include_directories(include) include_directories(${CMAKE_CURRENT_BINARY_DIR}/include/tdep) include_directories(${CMAKE_CURRENT_BINARY_DIR}/include) # files for cross os compilation include_directories(include/win) add_definitions(-D_CRT_DECLARE_NONSTDC_NAMES) # Warnings in release builds add_compile_options(-wd4068) # ignore unknown pragma warnings (gcc pragmas) add_compile_options(-wd4146) # minus operator applied to unsigned add_compile_options(-wd4244) # possible loss of data add_compile_options(-wd4334) # 32-bit shift implicitly converted to 64 bits # Disable warning due to incorrect format specifier in debugging printf via the Debug macro add_compile_options(-wd4311) # pointer truncation from 'unw_word_t *' to 'long' add_compile_options(-wd4475) # 'fprintf' : length modifier 'L' cannot be used add_compile_options(-wd4477) # fprintf argument type endif (CLR_CMAKE_HOST_WIN32) if(CLR_CMAKE_TARGET_ARCH_ARM OR CLR_CMAKE_TARGET_ARCH_ARMV6) # The arm sources include ex_tables.h from include/tdep-arm without going through a redirection # in include/tdep like it works for similar files on other architectures. So we need to add # the include/tdep-arm to include directories include_directories(include/tdep-arm) endif() include(configure.cmake) add_subdirectory(src)
1
dotnet/runtime
66,234
Remove compiler warning suppression
Contributes to https://github.com/dotnet/runtime/issues/66154 All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address. ~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~ Upstream PR: https://github.com/libunwind/libunwind/pull/333 /cc @GrabYourPitchforks
AaronRobinsonMSFT
2022-03-05T03:45:49Z
2022-03-09T00:57:12Z
220e67755ae6323a01011b83070dff6f84b02519
853e494abd1c1e12d28a76829b4680af7f694afa
Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154 All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address. ~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~ Upstream PR: https://github.com/libunwind/libunwind/pull/333 /cc @GrabYourPitchforks
./src/coreclr/pal/src/libunwind/include/mempool.h
/* libunwind - a platform-independent unwind library Copyright (C) 2002-2003 Hewlett-Packard Co Contributed by David Mosberger-Tang <[email protected]> This file is part of libunwind. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #ifndef mempool_h #define mempool_h /* Memory pools provide simple memory management of fixed-size objects. Memory pools are used for two purposes: o To ensure a stack can be unwound even when a process is out of memory. o To ensure a stack can be unwound at any time in a multi-threaded process (e.g., even at a time when the normal malloc-lock is taken, possibly by the very thread that is being unwind). To achieve the second objective, memory pools allocate memory directly via mmap() system call (or an equivalent facility). The first objective is accomplished by reserving memory ahead of time. Since the memory requirements of stack unwinding generally depends on the complexity of the procedures being unwind, there is no absolute guarantee that unwinding will always work, but in practice, this should not be a serious problem. */ #include <sys/types.h> #include "libunwind_i.h" #define sos_alloc(s) UNWI_ARCH_OBJ(_sos_alloc)(s) #define mempool_init(p,s,r) UNWI_ARCH_OBJ(_mempool_init)(p,s,r) #define mempool_alloc(p) UNWI_ARCH_OBJ(_mempool_alloc)(p) #define mempool_free(p,o) UNWI_ARCH_OBJ(_mempool_free)(p,o) /* The mempool structure should be treated as an opaque object. It's declared here only to enable static allocation of mempools. */ struct mempool { pthread_mutex_t lock; size_t obj_size; /* object size (rounded up for alignment) */ size_t chunk_size; /* allocation granularity */ unsigned int reserve; /* minimum (desired) size of the free-list */ unsigned int num_free; /* number of objects on the free-list */ struct object { struct object *next; } *free_list; }; /* Emergency allocation for one-time stuff that doesn't fit the memory pool model. A limited amount of memory is available in this fashion and once allocated, there is no way to free it. */ extern void *sos_alloc (size_t size); /* Initialize POOL for an object size of OBJECT_SIZE bytes. RESERVE is the number of objects that should be reserved for use under tight memory situations. If it is zero, mempool attempts to pick a reasonable default value. */ extern void mempool_init (struct mempool *pool, size_t obj_size, size_t reserve); extern void *mempool_alloc (struct mempool *pool); extern void mempool_free (struct mempool *pool, void *object); #endif /* mempool_h */
/* libunwind - a platform-independent unwind library Copyright (C) 2002-2003 Hewlett-Packard Co Contributed by David Mosberger-Tang <[email protected]> This file is part of libunwind. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #ifndef mempool_h #define mempool_h /* Memory pools provide simple memory management of fixed-size objects. Memory pools are used for two purposes: o To ensure a stack can be unwound even when a process is out of memory. o To ensure a stack can be unwound at any time in a multi-threaded process (e.g., even at a time when the normal malloc-lock is taken, possibly by the very thread that is being unwind). To achieve the second objective, memory pools allocate memory directly via mmap() system call (or an equivalent facility). The first objective is accomplished by reserving memory ahead of time. Since the memory requirements of stack unwinding generally depends on the complexity of the procedures being unwind, there is no absolute guarantee that unwinding will always work, but in practice, this should not be a serious problem. */ #include <sys/types.h> #include "libunwind_i.h" #define sos_alloc(s) UNWI_ARCH_OBJ(_sos_alloc)(s) #define mempool_init(p,s,r) UNWI_ARCH_OBJ(_mempool_init)(p,s,r) #define mempool_alloc(p) UNWI_ARCH_OBJ(_mempool_alloc)(p) #define mempool_free(p,o) UNWI_ARCH_OBJ(_mempool_free)(p,o) /* The mempool structure should be treated as an opaque object. It's declared here only to enable static allocation of mempools. */ struct mempool { pthread_mutex_t lock; size_t obj_size; /* object size (rounded up for alignment) */ size_t chunk_size; /* allocation granularity */ size_t reserve; /* minimum (desired) size of the free-list */ size_t num_free; /* number of objects on the free-list */ struct object { struct object *next; } *free_list; }; /* Emergency allocation for one-time stuff that doesn't fit the memory pool model. A limited amount of memory is available in this fashion and once allocated, there is no way to free it. */ extern void *sos_alloc (size_t size); /* Initialize POOL for an object size of OBJECT_SIZE bytes. RESERVE is the number of objects that should be reserved for use under tight memory situations. If it is zero, mempool attempts to pick a reasonable default value. */ extern void mempool_init (struct mempool *pool, size_t obj_size, size_t reserve); extern void *mempool_alloc (struct mempool *pool); extern void mempool_free (struct mempool *pool, void *object); #endif /* mempool_h */
1
dotnet/runtime
66,234
Remove compiler warning suppression
Contributes to https://github.com/dotnet/runtime/issues/66154 All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address. ~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~ Upstream PR: https://github.com/libunwind/libunwind/pull/333 /cc @GrabYourPitchforks
AaronRobinsonMSFT
2022-03-05T03:45:49Z
2022-03-09T00:57:12Z
220e67755ae6323a01011b83070dff6f84b02519
853e494abd1c1e12d28a76829b4680af7f694afa
Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154 All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address. ~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~ Upstream PR: https://github.com/libunwind/libunwind/pull/333 /cc @GrabYourPitchforks
./src/coreclr/pal/src/libunwind/libunwind-version.txt
v1.6.2 https://github.com/libunwind/libunwind/commit/b3ca1b59a795a617877c01fe5d299ab7a07ff29d Replace CMakeLists.txt, src/CMakeLists.txt, configure.cmake with .NET custom version Keep .NET oop directory Reapply changes from https://github.com/dotnet/runtime/commit/1b5719c2e3dde393531eaeb5b5cde05dabeef5b8 Apply https://github.com/libunwind/libunwind/pull/317 For LoongArch64: Apply https://github.com/libunwind/libunwind/pull/316 and https://github.com/libunwind/libunwind/pull/322
v1.6.2 https://github.com/libunwind/libunwind/commit/b3ca1b59a795a617877c01fe5d299ab7a07ff29d Replace CMakeLists.txt, src/CMakeLists.txt, configure.cmake with .NET custom version Keep .NET oop directory Reapply changes from https://github.com/dotnet/runtime/commit/1b5719c2e3dde393531eaeb5b5cde05dabeef5b8 Apply https://github.com/libunwind/libunwind/pull/317 Apply https://github.com/libunwind/libunwind/pull/333 For LoongArch64: Apply https://github.com/libunwind/libunwind/pull/316 and https://github.com/libunwind/libunwind/pull/322
1
dotnet/runtime
66,234
Remove compiler warning suppression
Contributes to https://github.com/dotnet/runtime/issues/66154 All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address. ~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~ Upstream PR: https://github.com/libunwind/libunwind/pull/333 /cc @GrabYourPitchforks
AaronRobinsonMSFT
2022-03-05T03:45:49Z
2022-03-09T00:57:12Z
220e67755ae6323a01011b83070dff6f84b02519
853e494abd1c1e12d28a76829b4680af7f694afa
Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154 All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address. ~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~ Upstream PR: https://github.com/libunwind/libunwind/pull/333 /cc @GrabYourPitchforks
./src/coreclr/pal/src/libunwind/src/dwarf/Gfind_proc_info-lsb.c
/* libunwind - a platform-independent unwind library Copyright (c) 2003-2005 Hewlett-Packard Development Company, L.P. Contributed by David Mosberger-Tang <[email protected]> This file is part of libunwind. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ /* Locate an FDE via the ELF data-structures defined by LSB v1.3 (http://www.linuxbase.org/spec/). */ #include <stddef.h> #include <stdio.h> #include <limits.h> #include "dwarf_i.h" #include "dwarf-eh.h" #include "libunwind_i.h" #ifdef HAVE_ZLIB #include <zlib.h> #endif /* HAVE_ZLIB */ struct table_entry { int32_t start_ip_offset; int32_t fde_offset; }; #ifndef UNW_REMOTE_ONLY #ifdef __linux__ #include "os-linux.h" #endif #ifndef __clang__ static ALIAS(dwarf_search_unwind_table) int dwarf_search_unwind_table_int (unw_addr_space_t as, unw_word_t ip, unw_dyn_info_t *di, unw_proc_info_t *pi, int need_unwind_info, void *arg); #else #define dwarf_search_unwind_table_int dwarf_search_unwind_table #endif static int linear_search (unw_addr_space_t as, unw_word_t ip, unw_word_t eh_frame_start, unw_word_t eh_frame_end, unw_word_t fde_count, unw_proc_info_t *pi, int need_unwind_info, void *arg) { unw_accessors_t *a = unw_get_accessors_int (unw_local_addr_space); unw_word_t i = 0, fde_addr, addr = eh_frame_start; int ret; while (i++ < fde_count && addr < eh_frame_end) { fde_addr = addr; if ((ret = dwarf_extract_proc_info_from_fde (as, a, &addr, pi, eh_frame_start, 0, 0, arg)) < 0) return ret; if (ip >= pi->start_ip && ip < pi->end_ip) { if (!need_unwind_info) return 1; addr = fde_addr; if ((ret = dwarf_extract_proc_info_from_fde (as, a, &addr, pi, eh_frame_start, need_unwind_info, 0, arg)) < 0) return ret; return 1; } } return -UNW_ENOINFO; } #endif /* !UNW_REMOTE_ONLY */ #ifdef CONFIG_DEBUG_FRAME /* Load .debug_frame section from FILE. Allocates and returns space in *BUF, and sets *BUFSIZE to its size. IS_LOCAL is 1 if using the local process, in which case we can search the system debug file directory; 0 for other address spaces, in which case we do not. Returns 0 on success, 1 on error. Succeeds even if the file contains no .debug_frame. */ /* XXX: Could use mmap; but elf_map_image keeps tons mapped in. */ static int load_debug_frame (const char *file, char **buf, size_t *bufsize, int is_local, unw_word_t segbase, unw_word_t *load_offset) { struct elf_image ei; Elf_W (Ehdr) *ehdr; Elf_W (Phdr) *phdr; Elf_W (Shdr) *shdr; int i; int ret; ei.image = NULL; *load_offset = 0; ret = elf_w (load_debuglink) (file, &ei, is_local); if (ret != 0) return ret; shdr = elf_w (find_section) (&ei, ".debug_frame"); if (!shdr || (shdr->sh_offset + shdr->sh_size > ei.size)) { munmap(ei.image, ei.size); return 1; } #if defined(SHF_COMPRESSED) if (shdr->sh_flags & SHF_COMPRESSED) { unsigned long destSize; Elf_W (Chdr) *chdr = (shdr->sh_offset + ei.image); #ifdef HAVE_ZLIB if (chdr->ch_type == ELFCOMPRESS_ZLIB) { *bufsize = destSize = chdr->ch_size; GET_MEMORY (*buf, *bufsize); if (!*buf) { Debug (2, "failed to allocate zlib .debug_frame buffer, skipping\n"); munmap(ei.image, ei.size); return 1; } ret = uncompress((unsigned char *)*buf, &destSize, shdr->sh_offset + ei.image + sizeof(*chdr), shdr->sh_size - sizeof(*chdr)); if (ret != Z_OK) { Debug (2, "failed to decompress zlib .debug_frame, skipping\n"); munmap(*buf, *bufsize); munmap(ei.image, ei.size); return 1; } Debug (4, "read %zd->%zd bytes of .debug_frame from offset %zd\n", shdr->sh_size, *bufsize, shdr->sh_offset); } else #endif /* HAVE_ZLIB */ { Debug (2, "unknown compression type %d, skipping\n", chdr->ch_type); munmap(ei.image, ei.size); return 1; } } else { #endif *bufsize = shdr->sh_size; GET_MEMORY (*buf, *bufsize); if (!*buf) { Debug (2, "failed to allocate .debug_frame buffer, skipping\n"); munmap(ei.image, ei.size); return 1; } memcpy(*buf, shdr->sh_offset + ei.image, *bufsize); Debug (4, "read %zd bytes of .debug_frame from offset %zd\n", *bufsize, shdr->sh_offset); #if defined(SHF_COMPRESSED) } #endif ehdr = ei.image; phdr = (Elf_W (Phdr) *) ((char *) ei.image + ehdr->e_phoff); for (i = 0; i < ehdr->e_phnum; ++i) if (phdr[i].p_type == PT_LOAD) { *load_offset = segbase - phdr[i].p_vaddr; Debug (4, "%s load offset is 0x%zx\n", file, *load_offset); break; } munmap(ei.image, ei.size); return 0; } /* Locate the binary which originated the contents of address ADDR. Return the name of the binary in *name (space is allocated by the caller) Returns 0 if a binary is successfully found, or 1 if an error occurs. */ static int find_binary_for_address (unw_word_t ip, char *name, size_t name_size) { #if defined(__linux__) && (!UNW_REMOTE_ONLY) struct map_iterator mi; int found = 0; int pid = getpid (); unsigned long segbase, mapoff, hi; if (maps_init (&mi, pid) != 0) return 1; while (maps_next (&mi, &segbase, &hi, &mapoff, NULL)) if (ip >= segbase && ip < hi) { size_t len = strlen (mi.path); if (len + 1 <= name_size) { memcpy (name, mi.path, len + 1); found = 1; } break; } maps_close (&mi); return !found; #endif return 1; } /* Locate and/or try to load a debug_frame section for address ADDR. Return pointer to debug frame descriptor, or zero if not found. */ static struct unw_debug_frame_list * locate_debug_info (unw_addr_space_t as, unw_word_t addr, unw_word_t segbase, const char *dlname, unw_word_t start, unw_word_t end) { struct unw_debug_frame_list *w, *fdesc = 0; char path[PATH_MAX]; char *name = path; int err; char *buf; size_t bufsize; unw_word_t load_offset; /* First, see if we loaded this frame already. */ for (w = as->debug_frames; w; w = w->next) { Debug (4, "checking %p: %lx-%lx\n", w, (long)w->start, (long)w->end); if (addr >= w->start && addr < w->end) return w; } /* If the object name we receive is blank, there's still a chance of locating the file by parsing /proc/self/maps. */ if (strcmp (dlname, "") == 0) { err = find_binary_for_address (addr, name, sizeof(path)); if (err) { Debug (15, "tried to locate binary for 0x%" PRIx64 ", but no luck\n", (uint64_t) addr); return 0; } } else name = (char*) dlname; err = load_debug_frame (name, &buf, &bufsize, as == unw_local_addr_space, segbase, &load_offset); if (!err) { GET_MEMORY (fdesc, sizeof (struct unw_debug_frame_list)); if (!fdesc) { Debug (2, "failed to allocate frame list entry\n"); return 0; } fdesc->start = start; fdesc->end = end; fdesc->load_offset = load_offset; fdesc->debug_frame = buf; fdesc->debug_frame_size = bufsize; fdesc->index = NULL; fdesc->next = as->debug_frames; as->debug_frames = fdesc; } return fdesc; } static size_t debug_frame_index_make (struct unw_debug_frame_list *fdesc) { unw_accessors_t *a = unw_get_accessors_int (unw_local_addr_space); char *buf = fdesc->debug_frame; size_t bufsize = fdesc->debug_frame_size; unw_word_t addr = (unw_word_t) (uintptr_t) buf; size_t count = 0; while (addr < (unw_word_t) (uintptr_t) (buf + bufsize)) { unw_word_t item_start = addr, item_end = 0; uint32_t u32val = 0; uint64_t cie_id = 0; uint64_t id_for_cie; dwarf_readu32 (unw_local_addr_space, a, &addr, &u32val, NULL); if (u32val == 0) break; if (u32val != 0xffffffff) { uint32_t cie_id32 = 0; item_end = addr + u32val; dwarf_readu32 (unw_local_addr_space, a, &addr, &cie_id32, NULL); cie_id = cie_id32; id_for_cie = 0xffffffff; } else { uint64_t u64val = 0; /* Extended length. */ dwarf_readu64 (unw_local_addr_space, a, &addr, &u64val, NULL); item_end = addr + u64val; dwarf_readu64 (unw_local_addr_space, a, &addr, &cie_id, NULL); id_for_cie = 0xffffffffffffffffull; } /*Debug (1, "CIE/FDE id = %.8x\n", (int) cie_id);*/ if (cie_id == id_for_cie) { ; /*Debug (1, "Found CIE at %.8x.\n", item_start);*/ } else { unw_word_t fde_addr = item_start; unw_proc_info_t this_pi; int err; /*Debug (1, "Found FDE at %.8x\n", item_start);*/ err = dwarf_extract_proc_info_from_fde (unw_local_addr_space, a, &fde_addr, &this_pi, (uintptr_t) buf, 0, 1, NULL); if (!err) { Debug (15, "start_ip = %lx, end_ip = %lx\n", (long) this_pi.start_ip, (long) this_pi.end_ip); if (fdesc->index) { struct table_entry *e = &fdesc->index[count]; e->fde_offset = item_start - (unw_word_t) (uintptr_t) buf; e->start_ip_offset = this_pi.start_ip; } count++; } /*else Debug (1, "FDE parse failed\n");*/ } addr = item_end; } return count; } static void debug_frame_index_sort (struct unw_debug_frame_list *fdesc) { size_t i, j, k, n = fdesc->index_size / sizeof (*fdesc->index); struct table_entry *a = fdesc->index; struct table_entry t; /* Use a simple Shell sort as it relatively fast and * does not require additional memory. */ for (k = n / 2; k > 0; k /= 2) { for (i = k; i < n; i++) { t = a[i]; for (j = i; j >= k; j -= k) { if (t.start_ip_offset >= a[j - k].start_ip_offset) break; a[j] = a[j - k]; } a[j] = t; } } } int dwarf_find_debug_frame (int found, unw_dyn_info_t *di_debug, unw_word_t ip, unw_word_t segbase, const char* obj_name, unw_word_t start, unw_word_t end) { unw_dyn_info_t *di = di_debug; struct unw_debug_frame_list *fdesc; Debug (15, "Trying to find .debug_frame for %s\n", obj_name); fdesc = locate_debug_info (unw_local_addr_space, ip, segbase, obj_name, start, end); if (!fdesc) { Debug (15, "couldn't load .debug_frame\n"); return found; } Debug (15, "loaded .debug_frame\n"); if (fdesc->debug_frame_size == 0) { Debug (15, "zero-length .debug_frame\n"); return found; } /* Now create a binary-search table, if it does not already exist. */ if (!fdesc->index) { /* Find all FDE entries in debug_frame, and make into a sorted index. First determine an index element count. */ size_t count = debug_frame_index_make (fdesc); if (!count) { Debug (15, "no CIE/FDE found in .debug_frame\n"); return found; } fdesc->index_size = count * sizeof (*fdesc->index); GET_MEMORY (fdesc->index, fdesc->index_size); if (!fdesc->index) { Debug (15, "couldn't allocate a frame index table\n"); fdesc->index_size = 0; return found; } /* Then fill and sort the index. */ debug_frame_index_make (fdesc); debug_frame_index_sort (fdesc); /*for (i = 0; i < count; i++) { const struct table_entry *e = &fdesc->index[i]; Debug (15, "ip %x, FDE offset %x\n", e->start_ip_offset, e->fde_offset); }*/ } di->format = UNW_INFO_FORMAT_TABLE; di->start_ip = fdesc->start; di->end_ip = fdesc->end; di->load_offset = fdesc->load_offset; di->u.ti.name_ptr = (unw_word_t) (uintptr_t) obj_name; di->u.ti.table_data = (unw_word_t *) fdesc; di->u.ti.table_len = sizeof (*fdesc) / sizeof (unw_word_t); di->u.ti.segbase = segbase; found = 1; Debug (15, "found debug_frame table `%s': segbase=0x%lx, len=%lu, " "gp=0x%lx, table_data=0x%lx\n", (char *) (uintptr_t) di->u.ti.name_ptr, (long) di->u.ti.segbase, (long) di->u.ti.table_len, (long) di->gp, (long) di->u.ti.table_data); return found; } #endif /* CONFIG_DEBUG_FRAME */ #ifndef UNW_REMOTE_ONLY static Elf_W (Addr) dwarf_find_eh_frame_section(struct dl_phdr_info *info) { int rc; struct elf_image ei; Elf_W (Addr) eh_frame = 0; Elf_W (Shdr)* shdr; const char *file = info->dlpi_name; char exepath[PATH_MAX]; if (strlen(file) == 0) { tdep_get_exe_image_path(exepath); file = exepath; } Debug (1, "looking for .eh_frame section in %s\n", file); rc = elf_map_image (&ei, file); if (rc != 0) return 0; shdr = elf_w (find_section) (&ei, ".eh_frame"); if (!shdr) goto out; eh_frame = shdr->sh_addr + info->dlpi_addr; Debug (4, "found .eh_frame at address %lx\n", eh_frame); out: munmap (ei.image, ei.size); return eh_frame; } struct dwarf_callback_data { /* in: */ unw_word_t ip; /* instruction-pointer we're looking for */ unw_proc_info_t *pi; /* proc-info pointer */ int need_unwind_info; /* out: */ int single_fde; /* did we find a single FDE? (vs. a table) */ unw_dyn_info_t di; /* table info (if single_fde is false) */ unw_dyn_info_t di_debug; /* additional table info for .debug_frame */ }; /* ptr is a pointer to a dwarf_callback_data structure and, on entry, member ip contains the instruction-pointer we're looking for. */ HIDDEN int dwarf_callback (struct dl_phdr_info *info, size_t size, void *ptr) { struct dwarf_callback_data *cb_data = ptr; unw_dyn_info_t *di = &cb_data->di; const Elf_W(Phdr) *phdr, *p_eh_hdr, *p_dynamic, *p_text; unw_word_t addr, eh_frame_start, eh_frame_end, fde_count, ip; Elf_W(Addr) load_base, max_load_addr = 0; int ret, need_unwind_info = cb_data->need_unwind_info; unw_proc_info_t *pi = cb_data->pi; struct dwarf_eh_frame_hdr *hdr = NULL; unw_accessors_t *a; long n; int found = 0; struct dwarf_eh_frame_hdr synth_eh_frame_hdr; #ifdef CONFIG_DEBUG_FRAME unw_word_t start, end; #endif /* CONFIG_DEBUG_FRAME*/ ip = cb_data->ip; /* Make sure struct dl_phdr_info is at least as big as we need. */ if (size < offsetof (struct dl_phdr_info, dlpi_phnum) + sizeof (info->dlpi_phnum)) return -1; Debug (15, "checking %s, base=0x%lx)\n", info->dlpi_name, (long) info->dlpi_addr); phdr = info->dlpi_phdr; load_base = info->dlpi_addr; p_text = NULL; p_eh_hdr = NULL; p_dynamic = NULL; /* See if PC falls into one of the loaded segments. Find the eh-header segment at the same time. */ for (n = info->dlpi_phnum; --n >= 0; phdr++) { if (phdr->p_type == PT_LOAD) { Elf_W(Addr) vaddr = phdr->p_vaddr + load_base; if (ip >= vaddr && ip < vaddr + phdr->p_memsz) p_text = phdr; if (vaddr + phdr->p_filesz > max_load_addr) max_load_addr = vaddr + phdr->p_filesz; } else if (phdr->p_type == PT_GNU_EH_FRAME) p_eh_hdr = phdr; #if defined __sun else if (phdr->p_type == PT_SUNW_UNWIND) p_eh_hdr = phdr; #endif else if (phdr->p_type == PT_DYNAMIC) p_dynamic = phdr; } if (!p_text) return 0; if (p_eh_hdr) { hdr = (struct dwarf_eh_frame_hdr *) (p_eh_hdr->p_vaddr + load_base); } else { Elf_W (Addr) eh_frame; Debug (1, "no .eh_frame_hdr section found\n"); eh_frame = dwarf_find_eh_frame_section (info); if (eh_frame) { Debug (1, "using synthetic .eh_frame_hdr section for %s\n", info->dlpi_name); synth_eh_frame_hdr.version = DW_EH_VERSION; synth_eh_frame_hdr.eh_frame_ptr_enc = DW_EH_PE_absptr | ((sizeof(Elf_W (Addr)) == 4) ? DW_EH_PE_udata4 : DW_EH_PE_udata8); synth_eh_frame_hdr.fde_count_enc = DW_EH_PE_omit; synth_eh_frame_hdr.table_enc = DW_EH_PE_omit; synth_eh_frame_hdr.eh_frame = eh_frame; hdr = &synth_eh_frame_hdr; } } if (hdr) { if (p_dynamic) { /* For dynamicly linked executables and shared libraries, DT_PLTGOT is the value that data-relative addresses are relative to for that object. We call this the "gp". */ Elf_W(Dyn) *dyn = (Elf_W(Dyn) *)(p_dynamic->p_vaddr + load_base); for (; dyn->d_tag != DT_NULL; ++dyn) if (dyn->d_tag == DT_PLTGOT) { /* Assume that _DYNAMIC is writable and GLIBC has relocated it (true for x86 at least). */ di->gp = dyn->d_un.d_ptr; break; } } else /* Otherwise this is a static executable with no _DYNAMIC. Assume that data-relative addresses are relative to 0, i.e., absolute. */ di->gp = 0; pi->gp = di->gp; if (hdr->version != DW_EH_VERSION) { Debug (1, "table `%s' has unexpected version %d\n", info->dlpi_name, hdr->version); return 0; } a = unw_get_accessors_int (unw_local_addr_space); addr = (unw_word_t) (uintptr_t) (&hdr->eh_frame); /* (Optionally) read eh_frame_ptr: */ if ((ret = dwarf_read_encoded_pointer (unw_local_addr_space, a, &addr, hdr->eh_frame_ptr_enc, pi, &eh_frame_start, NULL)) < 0) return ret; /* (Optionally) read fde_count: */ if ((ret = dwarf_read_encoded_pointer (unw_local_addr_space, a, &addr, hdr->fde_count_enc, pi, &fde_count, NULL)) < 0) return ret; if (hdr->table_enc != (DW_EH_PE_datarel | DW_EH_PE_sdata4)) { /* If there is no search table or it has an unsupported encoding, fall back on linear search. */ if (hdr->table_enc == DW_EH_PE_omit) { Debug (4, "table `%s' lacks search table; doing linear search\n", info->dlpi_name); } else { Debug (4, "table `%s' has encoding 0x%x; doing linear search\n", info->dlpi_name, hdr->table_enc); } eh_frame_end = max_load_addr; /* XXX can we do better? */ if (hdr->fde_count_enc == DW_EH_PE_omit) fde_count = ~0UL; if (hdr->eh_frame_ptr_enc == DW_EH_PE_omit) abort (); Debug (1, "eh_frame_start = %lx eh_frame_end = %lx\n", eh_frame_start, eh_frame_end); /* XXX we know how to build a local binary search table for .debug_frame, so we could do that here too. */ found = linear_search (unw_local_addr_space, ip, eh_frame_start, eh_frame_end, fde_count, pi, need_unwind_info, NULL); if (found != 1) found = 0; else cb_data->single_fde = 1; } else { di->format = UNW_INFO_FORMAT_REMOTE_TABLE; di->start_ip = p_text->p_vaddr + load_base; di->end_ip = p_text->p_vaddr + load_base + p_text->p_memsz; di->u.rti.name_ptr = (unw_word_t) (uintptr_t) info->dlpi_name; di->u.rti.table_data = addr; assert (sizeof (struct table_entry) % sizeof (unw_word_t) == 0); di->u.rti.table_len = (fde_count * sizeof (struct table_entry) / sizeof (unw_word_t)); /* For the binary-search table in the eh_frame_hdr, data-relative means relative to the start of that section... */ di->u.rti.segbase = (unw_word_t) (uintptr_t) hdr; found = 1; Debug (15, "found table `%s': segbase=0x%lx, len=%lu, gp=0x%lx, " "table_data=0x%lx\n", (char *) (uintptr_t) di->u.rti.name_ptr, (long) di->u.rti.segbase, (long) di->u.rti.table_len, (long) di->gp, (long) di->u.rti.table_data); } } #ifdef CONFIG_DEBUG_FRAME /* Find the start/end of the described region by parsing the phdr_info structure. */ start = (unw_word_t) -1; end = 0; for (n = 0; n < info->dlpi_phnum; n++) { if (info->dlpi_phdr[n].p_type == PT_LOAD) { unw_word_t seg_start = info->dlpi_addr + info->dlpi_phdr[n].p_vaddr; unw_word_t seg_end = seg_start + info->dlpi_phdr[n].p_memsz; if (seg_start < start) start = seg_start; if (seg_end > end) end = seg_end; } } found = dwarf_find_debug_frame (found, &cb_data->di_debug, ip, info->dlpi_addr, info->dlpi_name, start, end); #endif /* CONFIG_DEBUG_FRAME */ return found; } HIDDEN int dwarf_find_proc_info (unw_addr_space_t as, unw_word_t ip, unw_proc_info_t *pi, int need_unwind_info, void *arg) { struct dwarf_callback_data cb_data; intrmask_t saved_mask; int ret; Debug (14, "looking for IP=0x%lx\n", (long) ip); memset (&cb_data, 0, sizeof (cb_data)); cb_data.ip = ip; cb_data.pi = pi; cb_data.need_unwind_info = need_unwind_info; cb_data.di.format = -1; cb_data.di_debug.format = -1; SIGPROCMASK (SIG_SETMASK, &unwi_full_mask, &saved_mask); ret = dl_iterate_phdr (dwarf_callback, &cb_data); SIGPROCMASK (SIG_SETMASK, &saved_mask, NULL); if (ret > 0) { if (cb_data.single_fde) /* already got the result in *pi */ return 0; /* search the table: */ if (cb_data.di.format != -1) ret = dwarf_search_unwind_table_int (as, ip, &cb_data.di, pi, need_unwind_info, arg); else ret = -UNW_ENOINFO; if (ret == -UNW_ENOINFO && cb_data.di_debug.format != -1) ret = dwarf_search_unwind_table_int (as, ip, &cb_data.di_debug, pi, need_unwind_info, arg); } else ret = -UNW_ENOINFO; return ret; } static inline const struct table_entry * lookup (const struct table_entry *table, size_t table_size, int32_t rel_ip) { unsigned long table_len = table_size / sizeof (struct table_entry); const struct table_entry *e = NULL; unsigned long lo, hi, mid; /* do a binary search for right entry: */ for (lo = 0, hi = table_len; lo < hi;) { mid = (lo + hi) / 2; e = table + mid; Debug (15, "e->start_ip_offset = %lx\n", (long) e->start_ip_offset); if (rel_ip < e->start_ip_offset) hi = mid; else lo = mid + 1; } if (hi <= 0) return NULL; e = table + hi - 1; return e; } #endif /* !UNW_REMOTE_ONLY */ #ifndef UNW_LOCAL_ONLY /* Lookup an unwind-table entry in remote memory. Returns 1 if an entry is found, 0 if no entry is found, negative if an error occurred reading remote memory. */ static int remote_lookup (unw_addr_space_t as, unw_word_t table, size_t table_size, int32_t rel_ip, struct table_entry *e, int32_t *last_ip_offset, void *arg) { unsigned long table_len = table_size / sizeof (struct table_entry); unw_accessors_t *a = unw_get_accessors_int (as); unsigned long lo, hi, mid; unw_word_t e_addr = 0; int32_t start = 0; int ret; /* do a binary search for right entry: */ for (lo = 0, hi = table_len; lo < hi;) { mid = (lo + hi) / 2; e_addr = table + mid * sizeof (struct table_entry); if ((ret = dwarf_reads32 (as, a, &e_addr, &start, arg)) < 0) return ret; if (rel_ip < start) hi = mid; else lo = mid + 1; } if (hi <= 0) return 0; e_addr = table + (hi - 1) * sizeof (struct table_entry); if ((ret = dwarf_reads32 (as, a, &e_addr, &e->start_ip_offset, arg)) < 0 || (ret = dwarf_reads32 (as, a, &e_addr, &e->fde_offset, arg)) < 0 || (hi < table_len && (ret = dwarf_reads32 (as, a, &e_addr, last_ip_offset, arg)) < 0)) return ret; return 1; } #endif /* !UNW_LOCAL_ONLY */ static int is_remote_table(int format) { return (format == UNW_INFO_FORMAT_REMOTE_TABLE || format == UNW_INFO_FORMAT_IP_OFFSET); } int dwarf_search_unwind_table (unw_addr_space_t as, unw_word_t ip, unw_dyn_info_t *di, unw_proc_info_t *pi, int need_unwind_info, void *arg) { const struct table_entry *e = NULL, *table; unw_word_t ip_base = 0, segbase = 0, last_ip, fde_addr; unw_accessors_t *a; #ifndef UNW_LOCAL_ONLY struct table_entry ent; #endif int ret; unw_word_t debug_frame_base; size_t table_len; #ifdef UNW_REMOTE_ONLY assert (is_remote_table(di->format)); #else assert (is_remote_table(di->format) || di->format == UNW_INFO_FORMAT_TABLE); #endif assert (ip >= di->start_ip && ip < di->end_ip); if (is_remote_table(di->format)) { table = (const struct table_entry *) (uintptr_t) di->u.rti.table_data; table_len = di->u.rti.table_len * sizeof (unw_word_t); debug_frame_base = 0; } else { assert(di->format == UNW_INFO_FORMAT_TABLE); #ifndef UNW_REMOTE_ONLY struct unw_debug_frame_list *fdesc = (void *) di->u.ti.table_data; /* UNW_INFO_FORMAT_TABLE (i.e. .debug_frame) is read from local address space. Both the index and the unwind tables live in local memory, but the address space to check for properties like the address size and endianness is the target one. */ as = unw_local_addr_space; table = fdesc->index; table_len = fdesc->index_size; debug_frame_base = (uintptr_t) fdesc->debug_frame; #endif } a = unw_get_accessors_int (as); segbase = di->u.rti.segbase; if (di->format == UNW_INFO_FORMAT_IP_OFFSET) { ip_base = di->start_ip; } else { ip_base = segbase; } Debug (6, "lookup IP 0x%lx\n", (long) (ip - ip_base - di->load_offset)); #ifndef UNW_REMOTE_ONLY if (as == unw_local_addr_space) { e = lookup (table, table_len, ip - ip_base - di->load_offset); if (e && &e[1] < &table[table_len]) last_ip = e[1].start_ip_offset + ip_base + di->load_offset; else last_ip = di->end_ip; } else #endif { #ifndef UNW_LOCAL_ONLY int32_t last_ip_offset = di->end_ip - ip_base - di->load_offset; segbase = di->u.rti.segbase; if ((ret = remote_lookup (as, (uintptr_t) table, table_len, ip - ip_base, &ent, &last_ip_offset, arg)) < 0) return ret; if (ret) { e = &ent; last_ip = last_ip_offset + ip_base + di->load_offset; } else e = NULL; /* no info found */ #endif } if (!e) { Debug (1, "IP %lx inside range %lx-%lx, but no explicit unwind info found\n", (long) ip, (long) di->start_ip, (long) di->end_ip); /* IP is inside this table's range, but there is no explicit unwind info. */ return -UNW_ENOINFO; } Debug (15, "ip=0x%lx, load_offset=0x%lx, start_ip=0x%lx\n", (long) ip, (long) di->load_offset, (long) (e->start_ip_offset)); if (debug_frame_base) fde_addr = e->fde_offset + debug_frame_base; else fde_addr = e->fde_offset + segbase; Debug (1, "e->fde_offset = %lx, segbase = %lx, debug_frame_base = %lx, " "fde_addr = %lx\n", (long) e->fde_offset, (long) segbase, (long) debug_frame_base, (long) fde_addr); if ((ret = dwarf_extract_proc_info_from_fde (as, a, &fde_addr, pi, debug_frame_base ? debug_frame_base : segbase, need_unwind_info, debug_frame_base != 0, arg)) < 0) return ret; /* .debug_frame uses an absolute encoding that does not know about any shared library relocation. */ if (di->format == UNW_INFO_FORMAT_TABLE) { pi->start_ip += segbase; pi->end_ip += segbase; pi->flags = UNW_PI_FLAG_DEBUG_FRAME; } pi->start_ip += di->load_offset; pi->end_ip += di->load_offset; #if defined(NEED_LAST_IP) pi->last_ip = last_ip; #else (void)last_ip; #endif if (ip < pi->start_ip || ip >= pi->end_ip) return -UNW_ENOINFO; return 0; } HIDDEN void dwarf_put_unwind_info (unw_addr_space_t as, unw_proc_info_t *pi, void *arg) { return; /* always a nop */ }
/* libunwind - a platform-independent unwind library Copyright (c) 2003-2005 Hewlett-Packard Development Company, L.P. Contributed by David Mosberger-Tang <[email protected]> This file is part of libunwind. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ /* Locate an FDE via the ELF data-structures defined by LSB v1.3 (http://www.linuxbase.org/spec/). */ #include <stddef.h> #include <stdio.h> #include <limits.h> #include "dwarf_i.h" #include "dwarf-eh.h" #include "libunwind_i.h" #ifdef HAVE_ZLIB #include <zlib.h> #endif /* HAVE_ZLIB */ struct table_entry { int32_t start_ip_offset; int32_t fde_offset; }; #ifndef UNW_REMOTE_ONLY #ifdef __linux__ #include "os-linux.h" #endif #ifndef __clang__ static ALIAS(dwarf_search_unwind_table) int dwarf_search_unwind_table_int (unw_addr_space_t as, unw_word_t ip, unw_dyn_info_t *di, unw_proc_info_t *pi, int need_unwind_info, void *arg); #else #define dwarf_search_unwind_table_int dwarf_search_unwind_table #endif static int linear_search (unw_addr_space_t as, unw_word_t ip, unw_word_t eh_frame_start, unw_word_t eh_frame_end, unw_word_t fde_count, unw_proc_info_t *pi, int need_unwind_info, void *arg) { unw_accessors_t *a = unw_get_accessors_int (unw_local_addr_space); unw_word_t i = 0, fde_addr, addr = eh_frame_start; int ret; while (i++ < fde_count && addr < eh_frame_end) { fde_addr = addr; if ((ret = dwarf_extract_proc_info_from_fde (as, a, &addr, pi, eh_frame_start, 0, 0, arg)) < 0) return ret; if (ip >= pi->start_ip && ip < pi->end_ip) { if (!need_unwind_info) return 1; addr = fde_addr; if ((ret = dwarf_extract_proc_info_from_fde (as, a, &addr, pi, eh_frame_start, need_unwind_info, 0, arg)) < 0) return ret; return 1; } } return -UNW_ENOINFO; } #endif /* !UNW_REMOTE_ONLY */ #ifdef CONFIG_DEBUG_FRAME /* Load .debug_frame section from FILE. Allocates and returns space in *BUF, and sets *BUFSIZE to its size. IS_LOCAL is 1 if using the local process, in which case we can search the system debug file directory; 0 for other address spaces, in which case we do not. Returns 0 on success, 1 on error. Succeeds even if the file contains no .debug_frame. */ /* XXX: Could use mmap; but elf_map_image keeps tons mapped in. */ static int load_debug_frame (const char *file, char **buf, size_t *bufsize, int is_local, unw_word_t segbase, unw_word_t *load_offset) { struct elf_image ei; Elf_W (Ehdr) *ehdr; Elf_W (Phdr) *phdr; Elf_W (Shdr) *shdr; int i; int ret; ei.image = NULL; *load_offset = 0; ret = elf_w (load_debuglink) (file, &ei, is_local); if (ret != 0) return ret; shdr = elf_w (find_section) (&ei, ".debug_frame"); if (!shdr || (shdr->sh_offset + shdr->sh_size > ei.size)) { munmap(ei.image, ei.size); return 1; } #if defined(SHF_COMPRESSED) if (shdr->sh_flags & SHF_COMPRESSED) { unsigned long destSize; Elf_W (Chdr) *chdr = (shdr->sh_offset + ei.image); #ifdef HAVE_ZLIB if (chdr->ch_type == ELFCOMPRESS_ZLIB) { *bufsize = destSize = chdr->ch_size; GET_MEMORY (*buf, *bufsize); if (!*buf) { Debug (2, "failed to allocate zlib .debug_frame buffer, skipping\n"); munmap(ei.image, ei.size); return 1; } ret = uncompress((unsigned char *)*buf, &destSize, shdr->sh_offset + ei.image + sizeof(*chdr), shdr->sh_size - sizeof(*chdr)); if (ret != Z_OK) { Debug (2, "failed to decompress zlib .debug_frame, skipping\n"); munmap(*buf, *bufsize); munmap(ei.image, ei.size); return 1; } Debug (4, "read %zd->%zd bytes of .debug_frame from offset %zd\n", shdr->sh_size, *bufsize, shdr->sh_offset); } else #endif /* HAVE_ZLIB */ { Debug (2, "unknown compression type %d, skipping\n", chdr->ch_type); munmap(ei.image, ei.size); return 1; } } else { #endif *bufsize = shdr->sh_size; GET_MEMORY (*buf, *bufsize); if (!*buf) { Debug (2, "failed to allocate .debug_frame buffer, skipping\n"); munmap(ei.image, ei.size); return 1; } memcpy(*buf, shdr->sh_offset + ei.image, *bufsize); Debug (4, "read %zd bytes of .debug_frame from offset %zd\n", *bufsize, shdr->sh_offset); #if defined(SHF_COMPRESSED) } #endif ehdr = ei.image; phdr = (Elf_W (Phdr) *) ((char *) ei.image + ehdr->e_phoff); for (i = 0; i < ehdr->e_phnum; ++i) if (phdr[i].p_type == PT_LOAD) { *load_offset = segbase - phdr[i].p_vaddr; Debug (4, "%s load offset is 0x%zx\n", file, *load_offset); break; } munmap(ei.image, ei.size); return 0; } /* Locate the binary which originated the contents of address ADDR. Return the name of the binary in *name (space is allocated by the caller) Returns 0 if a binary is successfully found, or 1 if an error occurs. */ static int find_binary_for_address (unw_word_t ip, char *name, size_t name_size) { #if defined(__linux__) && (!UNW_REMOTE_ONLY) struct map_iterator mi; int found = 0; int pid = getpid (); unsigned long segbase, mapoff, hi; if (maps_init (&mi, pid) != 0) return 1; while (maps_next (&mi, &segbase, &hi, &mapoff, NULL)) if (ip >= segbase && ip < hi) { size_t len = strlen (mi.path); if (len + 1 <= name_size) { memcpy (name, mi.path, len + 1); found = 1; } break; } maps_close (&mi); return !found; #endif return 1; } /* Locate and/or try to load a debug_frame section for address ADDR. Return pointer to debug frame descriptor, or zero if not found. */ static struct unw_debug_frame_list * locate_debug_info (unw_addr_space_t as, unw_word_t addr, unw_word_t segbase, const char *dlname, unw_word_t start, unw_word_t end) { struct unw_debug_frame_list *w, *fdesc = 0; char path[PATH_MAX]; char *name = path; int err; char *buf; size_t bufsize; unw_word_t load_offset; /* First, see if we loaded this frame already. */ for (w = as->debug_frames; w; w = w->next) { Debug (4, "checking %p: %lx-%lx\n", w, (long)w->start, (long)w->end); if (addr >= w->start && addr < w->end) return w; } /* If the object name we receive is blank, there's still a chance of locating the file by parsing /proc/self/maps. */ if (strcmp (dlname, "") == 0) { err = find_binary_for_address (addr, name, sizeof(path)); if (err) { Debug (15, "tried to locate binary for 0x%" PRIx64 ", but no luck\n", (uint64_t) addr); return 0; } } else name = (char*) dlname; err = load_debug_frame (name, &buf, &bufsize, as == unw_local_addr_space, segbase, &load_offset); if (!err) { GET_MEMORY (fdesc, sizeof (struct unw_debug_frame_list)); if (!fdesc) { Debug (2, "failed to allocate frame list entry\n"); return 0; } fdesc->start = start; fdesc->end = end; fdesc->load_offset = load_offset; fdesc->debug_frame = buf; fdesc->debug_frame_size = bufsize; fdesc->index = NULL; fdesc->next = as->debug_frames; as->debug_frames = fdesc; } return fdesc; } static size_t debug_frame_index_make (struct unw_debug_frame_list *fdesc) { unw_accessors_t *a = unw_get_accessors_int (unw_local_addr_space); char *buf = fdesc->debug_frame; size_t bufsize = fdesc->debug_frame_size; unw_word_t addr = (unw_word_t) (uintptr_t) buf; size_t count = 0; while (addr < (unw_word_t) (uintptr_t) (buf + bufsize)) { unw_word_t item_start = addr, item_end = 0; uint32_t u32val = 0; uint64_t cie_id = 0; uint64_t id_for_cie; dwarf_readu32 (unw_local_addr_space, a, &addr, &u32val, NULL); if (u32val == 0) break; if (u32val != 0xffffffff) { uint32_t cie_id32 = 0; item_end = addr + u32val; dwarf_readu32 (unw_local_addr_space, a, &addr, &cie_id32, NULL); cie_id = cie_id32; id_for_cie = 0xffffffff; } else { uint64_t u64val = 0; /* Extended length. */ dwarf_readu64 (unw_local_addr_space, a, &addr, &u64val, NULL); item_end = addr + u64val; dwarf_readu64 (unw_local_addr_space, a, &addr, &cie_id, NULL); id_for_cie = 0xffffffffffffffffull; } /*Debug (1, "CIE/FDE id = %.8x\n", (int) cie_id);*/ if (cie_id == id_for_cie) { ; /*Debug (1, "Found CIE at %.8x.\n", item_start);*/ } else { unw_word_t fde_addr = item_start; unw_proc_info_t this_pi; int err; /*Debug (1, "Found FDE at %.8x\n", item_start);*/ err = dwarf_extract_proc_info_from_fde (unw_local_addr_space, a, &fde_addr, &this_pi, (uintptr_t) buf, 0, 1, NULL); if (!err) { Debug (15, "start_ip = %lx, end_ip = %lx\n", (long) this_pi.start_ip, (long) this_pi.end_ip); if (fdesc->index) { struct table_entry *e = &fdesc->index[count]; e->fde_offset = item_start - (unw_word_t) (uintptr_t) buf; e->start_ip_offset = this_pi.start_ip; } count++; } /*else Debug (1, "FDE parse failed\n");*/ } addr = item_end; } return count; } static void debug_frame_index_sort (struct unw_debug_frame_list *fdesc) { size_t i, j, k, n = fdesc->index_size / sizeof (*fdesc->index); struct table_entry *a = fdesc->index; struct table_entry t; /* Use a simple Shell sort as it relatively fast and * does not require additional memory. */ for (k = n / 2; k > 0; k /= 2) { for (i = k; i < n; i++) { t = a[i]; for (j = i; j >= k; j -= k) { if (t.start_ip_offset >= a[j - k].start_ip_offset) break; a[j] = a[j - k]; } a[j] = t; } } } int dwarf_find_debug_frame (int found, unw_dyn_info_t *di_debug, unw_word_t ip, unw_word_t segbase, const char* obj_name, unw_word_t start, unw_word_t end) { unw_dyn_info_t *di = di_debug; struct unw_debug_frame_list *fdesc; Debug (15, "Trying to find .debug_frame for %s\n", obj_name); fdesc = locate_debug_info (unw_local_addr_space, ip, segbase, obj_name, start, end); if (!fdesc) { Debug (15, "couldn't load .debug_frame\n"); return found; } Debug (15, "loaded .debug_frame\n"); if (fdesc->debug_frame_size == 0) { Debug (15, "zero-length .debug_frame\n"); return found; } /* Now create a binary-search table, if it does not already exist. */ if (!fdesc->index) { /* Find all FDE entries in debug_frame, and make into a sorted index. First determine an index element count. */ size_t count = debug_frame_index_make (fdesc); if (!count) { Debug (15, "no CIE/FDE found in .debug_frame\n"); return found; } fdesc->index_size = count * sizeof (*fdesc->index); GET_MEMORY (fdesc->index, fdesc->index_size); if (!fdesc->index) { Debug (15, "couldn't allocate a frame index table\n"); fdesc->index_size = 0; return found; } /* Then fill and sort the index. */ debug_frame_index_make (fdesc); debug_frame_index_sort (fdesc); /*for (i = 0; i < count; i++) { const struct table_entry *e = &fdesc->index[i]; Debug (15, "ip %x, FDE offset %x\n", e->start_ip_offset, e->fde_offset); }*/ } di->format = UNW_INFO_FORMAT_TABLE; di->start_ip = fdesc->start; di->end_ip = fdesc->end; di->load_offset = fdesc->load_offset; di->u.ti.name_ptr = (unw_word_t) (uintptr_t) obj_name; di->u.ti.table_data = (unw_word_t *) fdesc; di->u.ti.table_len = sizeof (*fdesc) / sizeof (unw_word_t); di->u.ti.segbase = segbase; found = 1; Debug (15, "found debug_frame table `%s': segbase=0x%lx, len=%lu, " "gp=0x%lx, table_data=0x%lx\n", (char *) (uintptr_t) di->u.ti.name_ptr, (long) di->u.ti.segbase, (long) di->u.ti.table_len, (long) di->gp, (long) di->u.ti.table_data); return found; } #endif /* CONFIG_DEBUG_FRAME */ #ifndef UNW_REMOTE_ONLY static Elf_W (Addr) dwarf_find_eh_frame_section(struct dl_phdr_info *info) { int rc; struct elf_image ei; Elf_W (Addr) eh_frame = 0; Elf_W (Shdr)* shdr; const char *file = info->dlpi_name; char exepath[PATH_MAX]; if (strlen(file) == 0) { tdep_get_exe_image_path(exepath); file = exepath; } Debug (1, "looking for .eh_frame section in %s\n", file); rc = elf_map_image (&ei, file); if (rc != 0) return 0; shdr = elf_w (find_section) (&ei, ".eh_frame"); if (!shdr) goto out; eh_frame = shdr->sh_addr + info->dlpi_addr; Debug (4, "found .eh_frame at address %lx\n", eh_frame); out: munmap (ei.image, ei.size); return eh_frame; } struct dwarf_callback_data { /* in: */ unw_word_t ip; /* instruction-pointer we're looking for */ unw_proc_info_t *pi; /* proc-info pointer */ int need_unwind_info; /* out: */ int single_fde; /* did we find a single FDE? (vs. a table) */ unw_dyn_info_t di; /* table info (if single_fde is false) */ unw_dyn_info_t di_debug; /* additional table info for .debug_frame */ }; /* ptr is a pointer to a dwarf_callback_data structure and, on entry, member ip contains the instruction-pointer we're looking for. */ HIDDEN int dwarf_callback (struct dl_phdr_info *info, size_t size, void *ptr) { struct dwarf_callback_data *cb_data = ptr; unw_dyn_info_t *di = &cb_data->di; const Elf_W(Phdr) *phdr, *p_eh_hdr, *p_dynamic, *p_text; unw_word_t addr, eh_frame_start, eh_frame_end, fde_count, ip; Elf_W(Addr) load_base, max_load_addr = 0; int ret, need_unwind_info = cb_data->need_unwind_info; unw_proc_info_t *pi = cb_data->pi; struct dwarf_eh_frame_hdr *hdr = NULL; unw_accessors_t *a; long n; int found = 0; struct dwarf_eh_frame_hdr synth_eh_frame_hdr; #ifdef CONFIG_DEBUG_FRAME unw_word_t start, end; #endif /* CONFIG_DEBUG_FRAME*/ ip = cb_data->ip; /* Make sure struct dl_phdr_info is at least as big as we need. */ if (size < offsetof (struct dl_phdr_info, dlpi_phnum) + sizeof (info->dlpi_phnum)) return -1; Debug (15, "checking %s, base=0x%lx)\n", info->dlpi_name, (long) info->dlpi_addr); phdr = info->dlpi_phdr; load_base = info->dlpi_addr; p_text = NULL; p_eh_hdr = NULL; p_dynamic = NULL; /* See if PC falls into one of the loaded segments. Find the eh-header segment at the same time. */ for (n = info->dlpi_phnum; --n >= 0; phdr++) { if (phdr->p_type == PT_LOAD) { Elf_W(Addr) vaddr = phdr->p_vaddr + load_base; if (ip >= vaddr && ip < vaddr + phdr->p_memsz) p_text = phdr; if (vaddr + phdr->p_filesz > max_load_addr) max_load_addr = vaddr + phdr->p_filesz; } else if (phdr->p_type == PT_GNU_EH_FRAME) p_eh_hdr = phdr; #if defined __sun else if (phdr->p_type == PT_SUNW_UNWIND) p_eh_hdr = phdr; #endif else if (phdr->p_type == PT_DYNAMIC) p_dynamic = phdr; } if (!p_text) return 0; if (p_eh_hdr) { hdr = (struct dwarf_eh_frame_hdr *) (p_eh_hdr->p_vaddr + load_base); } else { Elf_W (Addr) eh_frame; Debug (1, "no .eh_frame_hdr section found\n"); eh_frame = dwarf_find_eh_frame_section (info); if (eh_frame) { Debug (1, "using synthetic .eh_frame_hdr section for %s\n", info->dlpi_name); synth_eh_frame_hdr.version = DW_EH_VERSION; synth_eh_frame_hdr.eh_frame_ptr_enc = DW_EH_PE_absptr | ((sizeof(Elf_W (Addr)) == 4) ? DW_EH_PE_udata4 : DW_EH_PE_udata8); synth_eh_frame_hdr.fde_count_enc = DW_EH_PE_omit; synth_eh_frame_hdr.table_enc = DW_EH_PE_omit; synth_eh_frame_hdr.eh_frame = eh_frame; hdr = &synth_eh_frame_hdr; } } if (hdr) { if (p_dynamic) { /* For dynamicly linked executables and shared libraries, DT_PLTGOT is the value that data-relative addresses are relative to for that object. We call this the "gp". */ Elf_W(Dyn) *dyn = (Elf_W(Dyn) *)(p_dynamic->p_vaddr + load_base); for (; dyn->d_tag != DT_NULL; ++dyn) if (dyn->d_tag == DT_PLTGOT) { /* Assume that _DYNAMIC is writable and GLIBC has relocated it (true for x86 at least). */ di->gp = dyn->d_un.d_ptr; break; } } else /* Otherwise this is a static executable with no _DYNAMIC. Assume that data-relative addresses are relative to 0, i.e., absolute. */ di->gp = 0; pi->gp = di->gp; if (hdr->version != DW_EH_VERSION) { Debug (1, "table `%s' has unexpected version %d\n", info->dlpi_name, hdr->version); return 0; } a = unw_get_accessors_int (unw_local_addr_space); addr = (unw_word_t) (uintptr_t) (&hdr->eh_frame); /* (Optionally) read eh_frame_ptr: */ if ((ret = dwarf_read_encoded_pointer (unw_local_addr_space, a, &addr, hdr->eh_frame_ptr_enc, pi, &eh_frame_start, NULL)) < 0) return ret; /* (Optionally) read fde_count: */ if ((ret = dwarf_read_encoded_pointer (unw_local_addr_space, a, &addr, hdr->fde_count_enc, pi, &fde_count, NULL)) < 0) return ret; if (hdr->table_enc != (DW_EH_PE_datarel | DW_EH_PE_sdata4)) { /* If there is no search table or it has an unsupported encoding, fall back on linear search. */ if (hdr->table_enc == DW_EH_PE_omit) { Debug (4, "table `%s' lacks search table; doing linear search\n", info->dlpi_name); } else { Debug (4, "table `%s' has encoding 0x%x; doing linear search\n", info->dlpi_name, hdr->table_enc); } eh_frame_end = max_load_addr; /* XXX can we do better? */ if (hdr->fde_count_enc == DW_EH_PE_omit) fde_count = ~0UL; if (hdr->eh_frame_ptr_enc == DW_EH_PE_omit) abort (); Debug (1, "eh_frame_start = %lx eh_frame_end = %lx\n", eh_frame_start, eh_frame_end); /* XXX we know how to build a local binary search table for .debug_frame, so we could do that here too. */ found = linear_search (unw_local_addr_space, ip, eh_frame_start, eh_frame_end, fde_count, pi, need_unwind_info, NULL); if (found != 1) found = 0; else cb_data->single_fde = 1; } else { di->format = UNW_INFO_FORMAT_REMOTE_TABLE; di->start_ip = p_text->p_vaddr + load_base; di->end_ip = p_text->p_vaddr + load_base + p_text->p_memsz; di->u.rti.name_ptr = (unw_word_t) (uintptr_t) info->dlpi_name; di->u.rti.table_data = addr; assert (sizeof (struct table_entry) % sizeof (unw_word_t) == 0); di->u.rti.table_len = (fde_count * sizeof (struct table_entry) / sizeof (unw_word_t)); /* For the binary-search table in the eh_frame_hdr, data-relative means relative to the start of that section... */ di->u.rti.segbase = (unw_word_t) (uintptr_t) hdr; found = 1; Debug (15, "found table `%s': segbase=0x%lx, len=%lu, gp=0x%lx, " "table_data=0x%lx\n", (char *) (uintptr_t) di->u.rti.name_ptr, (long) di->u.rti.segbase, (long) di->u.rti.table_len, (long) di->gp, (long) di->u.rti.table_data); } } #ifdef CONFIG_DEBUG_FRAME /* Find the start/end of the described region by parsing the phdr_info structure. */ start = (unw_word_t) -1; end = 0; for (n = 0; n < info->dlpi_phnum; n++) { if (info->dlpi_phdr[n].p_type == PT_LOAD) { unw_word_t seg_start = info->dlpi_addr + info->dlpi_phdr[n].p_vaddr; unw_word_t seg_end = seg_start + info->dlpi_phdr[n].p_memsz; if (seg_start < start) start = seg_start; if (seg_end > end) end = seg_end; } } found = dwarf_find_debug_frame (found, &cb_data->di_debug, ip, info->dlpi_addr, info->dlpi_name, start, end); #endif /* CONFIG_DEBUG_FRAME */ return found; } HIDDEN int dwarf_find_proc_info (unw_addr_space_t as, unw_word_t ip, unw_proc_info_t *pi, int need_unwind_info, void *arg) { struct dwarf_callback_data cb_data; intrmask_t saved_mask; int ret; Debug (14, "looking for IP=0x%lx\n", (long) ip); memset (&cb_data, 0, sizeof (cb_data)); cb_data.ip = ip; cb_data.pi = pi; cb_data.need_unwind_info = need_unwind_info; cb_data.di.format = -1; cb_data.di_debug.format = -1; SIGPROCMASK (SIG_SETMASK, &unwi_full_mask, &saved_mask); ret = dl_iterate_phdr (dwarf_callback, &cb_data); SIGPROCMASK (SIG_SETMASK, &saved_mask, NULL); if (ret > 0) { if (cb_data.single_fde) /* already got the result in *pi */ return 0; /* search the table: */ if (cb_data.di.format != -1) ret = dwarf_search_unwind_table_int (as, ip, &cb_data.di, pi, need_unwind_info, arg); else ret = -UNW_ENOINFO; if (ret == -UNW_ENOINFO && cb_data.di_debug.format != -1) ret = dwarf_search_unwind_table_int (as, ip, &cb_data.di_debug, pi, need_unwind_info, arg); } else ret = -UNW_ENOINFO; return ret; } static inline const struct table_entry * lookup (const struct table_entry *table, size_t table_size, int32_t rel_ip) { unsigned long table_len = table_size / sizeof (struct table_entry); const struct table_entry *e = NULL; unsigned long lo, hi, mid; /* do a binary search for right entry: */ for (lo = 0, hi = table_len; lo < hi;) { mid = (lo + hi) / 2; e = table + mid; Debug (15, "e->start_ip_offset = %lx\n", (long) e->start_ip_offset); if (rel_ip < e->start_ip_offset) hi = mid; else lo = mid + 1; } if (hi <= 0) return NULL; e = table + hi - 1; return e; } #endif /* !UNW_REMOTE_ONLY */ #ifndef UNW_LOCAL_ONLY /* Lookup an unwind-table entry in remote memory. Returns 1 if an entry is found, 0 if no entry is found, negative if an error occurred reading remote memory. */ static int remote_lookup (unw_addr_space_t as, unw_word_t table, size_t table_size, int32_t rel_ip, struct table_entry *e, int32_t *last_ip_offset, void *arg) { size_t table_len = table_size / sizeof (struct table_entry); unw_accessors_t *a = unw_get_accessors_int (as); size_t lo, hi, mid; unw_word_t e_addr = 0; int32_t start = 0; int ret; /* do a binary search for right entry: */ for (lo = 0, hi = table_len; lo < hi;) { mid = (lo + hi) / 2; e_addr = table + mid * sizeof (struct table_entry); if ((ret = dwarf_reads32 (as, a, &e_addr, &start, arg)) < 0) return ret; if (rel_ip < start) hi = mid; else lo = mid + 1; } if (hi <= 0) return 0; e_addr = table + (hi - 1) * sizeof (struct table_entry); if ((ret = dwarf_reads32 (as, a, &e_addr, &e->start_ip_offset, arg)) < 0 || (ret = dwarf_reads32 (as, a, &e_addr, &e->fde_offset, arg)) < 0 || (hi < table_len && (ret = dwarf_reads32 (as, a, &e_addr, last_ip_offset, arg)) < 0)) return ret; return 1; } #endif /* !UNW_LOCAL_ONLY */ static int is_remote_table(int format) { return (format == UNW_INFO_FORMAT_REMOTE_TABLE || format == UNW_INFO_FORMAT_IP_OFFSET); } int dwarf_search_unwind_table (unw_addr_space_t as, unw_word_t ip, unw_dyn_info_t *di, unw_proc_info_t *pi, int need_unwind_info, void *arg) { const struct table_entry *e = NULL, *table; unw_word_t ip_base = 0, segbase = 0, last_ip, fde_addr; unw_accessors_t *a; #ifndef UNW_LOCAL_ONLY struct table_entry ent; #endif int ret; unw_word_t debug_frame_base; size_t table_len; #ifdef UNW_REMOTE_ONLY assert (is_remote_table(di->format)); #else assert (is_remote_table(di->format) || di->format == UNW_INFO_FORMAT_TABLE); #endif assert (ip >= di->start_ip && ip < di->end_ip); if (is_remote_table(di->format)) { table = (const struct table_entry *) (uintptr_t) di->u.rti.table_data; table_len = di->u.rti.table_len * sizeof (unw_word_t); debug_frame_base = 0; } else { assert(di->format == UNW_INFO_FORMAT_TABLE); #ifndef UNW_REMOTE_ONLY struct unw_debug_frame_list *fdesc = (void *) di->u.ti.table_data; /* UNW_INFO_FORMAT_TABLE (i.e. .debug_frame) is read from local address space. Both the index and the unwind tables live in local memory, but the address space to check for properties like the address size and endianness is the target one. */ as = unw_local_addr_space; table = fdesc->index; table_len = fdesc->index_size; debug_frame_base = (uintptr_t) fdesc->debug_frame; #endif } a = unw_get_accessors_int (as); segbase = di->u.rti.segbase; if (di->format == UNW_INFO_FORMAT_IP_OFFSET) { ip_base = di->start_ip; } else { ip_base = segbase; } Debug (6, "lookup IP 0x%lx\n", (long) (ip - ip_base - di->load_offset)); #ifndef UNW_REMOTE_ONLY if (as == unw_local_addr_space) { e = lookup (table, table_len, ip - ip_base - di->load_offset); if (e && &e[1] < &table[table_len]) last_ip = e[1].start_ip_offset + ip_base + di->load_offset; else last_ip = di->end_ip; } else #endif { #ifndef UNW_LOCAL_ONLY int32_t last_ip_offset = di->end_ip - ip_base - di->load_offset; segbase = di->u.rti.segbase; if ((ret = remote_lookup (as, (uintptr_t) table, table_len, ip - ip_base, &ent, &last_ip_offset, arg)) < 0) return ret; if (ret) { e = &ent; last_ip = last_ip_offset + ip_base + di->load_offset; } else e = NULL; /* no info found */ #endif } if (!e) { Debug (1, "IP %lx inside range %lx-%lx, but no explicit unwind info found\n", (long) ip, (long) di->start_ip, (long) di->end_ip); /* IP is inside this table's range, but there is no explicit unwind info. */ return -UNW_ENOINFO; } Debug (15, "ip=0x%lx, load_offset=0x%lx, start_ip=0x%lx\n", (long) ip, (long) di->load_offset, (long) (e->start_ip_offset)); if (debug_frame_base) fde_addr = e->fde_offset + debug_frame_base; else fde_addr = e->fde_offset + segbase; Debug (1, "e->fde_offset = %lx, segbase = %lx, debug_frame_base = %lx, " "fde_addr = %lx\n", (long) e->fde_offset, (long) segbase, (long) debug_frame_base, (long) fde_addr); if ((ret = dwarf_extract_proc_info_from_fde (as, a, &fde_addr, pi, debug_frame_base ? debug_frame_base : segbase, need_unwind_info, debug_frame_base != 0, arg)) < 0) return ret; /* .debug_frame uses an absolute encoding that does not know about any shared library relocation. */ if (di->format == UNW_INFO_FORMAT_TABLE) { pi->start_ip += segbase; pi->end_ip += segbase; pi->flags = UNW_PI_FLAG_DEBUG_FRAME; } pi->start_ip += di->load_offset; pi->end_ip += di->load_offset; #if defined(NEED_LAST_IP) pi->last_ip = last_ip; #else (void)last_ip; #endif if (ip < pi->start_ip || ip >= pi->end_ip) return -UNW_ENOINFO; return 0; } HIDDEN void dwarf_put_unwind_info (unw_addr_space_t as, unw_proc_info_t *pi, void *arg) { return; /* always a nop */ }
1
dotnet/runtime
66,234
Remove compiler warning suppression
Contributes to https://github.com/dotnet/runtime/issues/66154 All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address. ~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~ Upstream PR: https://github.com/libunwind/libunwind/pull/333 /cc @GrabYourPitchforks
AaronRobinsonMSFT
2022-03-05T03:45:49Z
2022-03-09T00:57:12Z
220e67755ae6323a01011b83070dff6f84b02519
853e494abd1c1e12d28a76829b4680af7f694afa
Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154 All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address. ~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~ Upstream PR: https://github.com/libunwind/libunwind/pull/333 /cc @GrabYourPitchforks
./src/coreclr/pal/src/safecrt/input.inl
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*** *input.c - C formatted input, used by scanf, etc. * * *Purpose: * defines _input() to do formatted input; called from scanf(), * etc. functions. This module defines _cscanf() instead when * CPRFLAG is defined. The file cscanf.c defines that symbol * and then includes this file in order to implement _cscanf(). * *Note: * this file is included in safecrt.lib build directly, plese refer * to safecrt_[w]input_s.c * *******************************************************************************/ #define ALLOW_RANGE /* enable "%[a-z]"-style scansets */ /* temporary work-around for compiler without 64-bit support */ #ifndef _INTEGRAL_MAX_BITS #define _INTEGRAL_MAX_BITS 64 #endif /* _INTEGRAL_MAX_BITS */ // typedef __int64_t __int64; #ifndef FALSE #define FALSE 0 #endif #ifndef TRUE #define TRUE 1 #endif #define UNALIGNED #define _BEGIN_SECURE_CRT_DEPRECATION_DISABLE #define _END_SECURE_CRT_DEPRECATION_DISABLE #define _CVTBUFSIZE (309+40) /* # of digits in max. dp value + slop */ #define _MBTOWC(x,y,z) _minimal_chartowchar( x, y ) #define _istspace(x) isspace((unsigned char)x) #define _malloc_crt PAL_malloc #define _realloc_crt PAL_realloc #define _free_crt PAL_free #define _FASSIGN(flag, argument, number, dec_point, locale) _safecrt_fassign((flag), (argument), (number)) #define _WFASSIGN(flag, argument, number, dec_point, locale) _safecrt_wfassign((flag), (argument), (number)) #if defined (UNICODE) #define ALLOC_TABLE 1 #else /* defined (UNICODE) */ #define ALLOC_TABLE 0 #endif /* defined (UNICODE) */ #define HEXTODEC(chr) _hextodec(chr) #define LEFT_BRACKET ('[' | ('a' - 'A')) /* 'lowercase' version */ static int __cdecl _hextodec(_TCHAR); #ifdef CPRFLAG #define INC() (++charcount, _inc()) #define UN_INC(chr) (--charcount, _un_inc(chr)) #define EAT_WHITE() _whiteout(&charcount) static int __cdecl _inc(void); static void __cdecl _un_inc(int); static int __cdecl _whiteout(int *); #else /* CPRFLAG */ #define INC() (++charcount, _inc(stream)) #define UN_INC(chr) (--charcount, _un_inc(chr, stream)) #define EAT_WHITE() _whiteout(&charcount, stream) static int __cdecl _inc(miniFILE *); static void __cdecl _un_inc(int, miniFILE *); static int __cdecl _whiteout(int *, miniFILE *); #endif /* CPRFLAG */ #undef _ISDIGIT #undef _ISXDIGIT #ifndef _UNICODE #define _ISDIGIT(chr) isdigit((unsigned char)chr) #define _ISXDIGIT(chr) isxdigit((unsigned char)chr) #else /* _UNICODE */ #define _ISDIGIT(chr) ( !(chr & 0xff00) && isdigit( ((chr) & 0x00ff) ) ) #define _ISXDIGIT(chr) ( !(chr & 0xff00) && isxdigit( ((chr) & 0x00ff) ) ) #endif /* _UNICODE */ #define MUL10(x) ( (((x)<<2) + (x))<<1 ) #define LONGLONG_IS_INT64 1 /* 1 means long long is same as int64 0 means long long is same as long */ /*** * int __check_float_string(size_t,size_t *, _TCHAR**, _TCHAR*, int*) * * Purpose: * Check if there is enough space insert onemore character in the given * block, if not then allocate more memory. * * Return: * FALSE if more memory needed and the reallocation failed. * *******************************************************************************/ static int __check_float_string(size_t nFloatStrUsed, size_t *pnFloatStrSz, _TCHAR **pFloatStr, _TCHAR *floatstring, int *pmalloc_FloatStrFlag) { void *tmpPointer; _ASSERTE(nFloatStrUsed<=(*pnFloatStrSz)); if (nFloatStrUsed==(*pnFloatStrSz)) { size_t newSize; // Will (*pnFloatStrSz) * 2 * sizeof(_TCHAR) overflow? if ( *pnFloatStrSz > (SIZE_T_MAX / 2 / sizeof(_TCHAR))) { return FALSE; } newSize = *pnFloatStrSz * 2 * sizeof(_TCHAR); if ((*pFloatStr)==floatstring) { if (((*pFloatStr)=(_TCHAR *)_malloc_crt(newSize))==NULL) { return FALSE; } (*pmalloc_FloatStrFlag)=1; memcpy((*pFloatStr),floatstring,(*pnFloatStrSz)*sizeof(_TCHAR)); (*pnFloatStrSz)*=2; } else { if ((tmpPointer=(_TCHAR *)_realloc_crt((*pFloatStr), newSize))==NULL) { return FALSE; } (*pFloatStr)=(_TCHAR *)(tmpPointer); (*pnFloatStrSz)*=2; } } return TRUE; } #define ASCII 32 /* # of bytes needed to hold 256 bits */ #define SCAN_SHORT 0 /* also for FLOAT */ #define SCAN_LONG 1 /* also for DOUBLE */ #define SCAN_L_DOUBLE 2 /* only for LONG DOUBLE */ #define SCAN_NEAR 0 #define SCAN_FAR 1 #ifndef _UNICODE #define TABLESIZE ASCII #else /* _UNICODE */ #define TABLESIZE (ASCII * 256) #endif /* _UNICODE */ /*** *int _input(stream, format, arglist), static int input(format, arglist) * *Purpose: * get input items (data items or literal matches) from the input stream * and assign them if appropriate to the items thru the arglist. this * function is intended for internal library use only, not for the user * * The _input entry point is for the normal scanf() functions * The input entry point is used when compiling for _cscanf() [CPRFLAF * defined] and is a static function called only by _cscanf() -- reads from * console. * * This code also defines _input_s, which works differently for %c, %s & %[. * For these, _input_s first picks up the next argument from the variable * argument list & uses it as the maximum size of the character array pointed * to by the next argument in the list. * *Entry: * FILE *stream - file to read from * char *format - format string to determine the data to read * arglist - list of pointer to data items * *Exit: * returns number of items assigned and fills in data items * returns EOF if error or EOF found on stream before 1st data item matched * *Exceptions: * *******************************************************************************/ #define _INTRN_LOCALE_CONV( x ) localeconv() #ifndef _UNICODE int __cdecl __tinput_s (miniFILE* stream, const _TUCHAR* format, va_list arglist) #else int __cdecl __twinput_s (miniFILE* stream, const _TUCHAR* format, va_list arglist) #endif /* _UNICODE */ { _TCHAR floatstring[_CVTBUFSIZE + 1]; _TCHAR *pFloatStr=floatstring; size_t nFloatStrUsed=0; size_t nFloatStrSz=sizeof(floatstring)/sizeof(floatstring[0]); int malloc_FloatStrFlag=0; unsigned long number; /* temp hold-value */ #if ALLOC_TABLE char *table = NULL; /* which chars allowed for %[] */ int malloc_flag = 0; /* is "table" allocated on the heap? */ #else /* ALLOC_TABLE */ char AsciiTable[TABLESIZE]; char *table = AsciiTable; #endif /* ALLOC_TABLE */ #if _INTEGRAL_MAX_BITS >= 64 uint64_t num64 = 0LL; /* temp for 64-bit integers */ #endif /* _INTEGRAL_MAX_BITS >= 64 */ void *pointer=NULL; /* points to user data receptacle */ void *start; /* indicate non-empty string */ #ifndef _UNICODE char16_t wctemp=L'\0'; #endif /* _UNICODE */ _TUCHAR *scanptr; /* for building "table" data */ int ch = 0; int charcount; /* total number of chars read */ int comchr; /* holds designator type */ int count; /* return value. # of assignments */ int started; /* indicate good number */ int width; /* width of field */ int widthset; /* user has specified width */ #ifdef _SECURE_SCANF size_t array_width = 0; size_t original_array_width = 0; int enomem = 0; int format_error = FALSE; #endif /* _SECURE_SCANF */ /* Neither coerceshort nor farone are need for the 386 */ char done_flag; /* general purpose loop monitor */ char longone; /* 0 = SHORT, 1 = LONG, 2 = L_DOUBLE */ #if _INTEGRAL_MAX_BITS >= 64 int integer64; /* 1 for 64-bit integer, 0 otherwise */ #endif /* _INTEGRAL_MAX_BITS >= 64 */ signed char widechar; /* -1 = char, 0 = ????, 1 = char16_t */ char reject; /* %[^ABC] instead of %[ABC] */ char negative; /* flag for '-' detected */ char suppress; /* don't assign anything */ char match; /* flag: !0 if any fields matched */ va_list arglistsave; /* save arglist value */ char fl_wchar_arg; /* flags wide char/string argument */ _TCHAR decimal; _TUCHAR rngch; _TUCHAR last; _TUCHAR prevchar; _TCHAR tch; _VALIDATE_RETURN( (format != NULL), EINVAL, EOF); #ifndef CPRFLAG _VALIDATE_RETURN( (stream != NULL), EINVAL, EOF); #endif /* CPRFLAG */ /* count = # fields assigned charcount = # chars read match = flag indicating if any fields were matched [Note that we need both count and match. For example, a field may match a format but have assignments suppressed. In this case, match will get set, but 'count' will still equal 0. We need to distinguish 'match vs no-match' when terminating due to EOF.] */ count = charcount = match = 0; while (*format) { if (_istspace((_TUCHAR)*format)) { UN_INC(EAT_WHITE()); /* put first non-space char back */ do { tch = *++format; } while (_istspace((_TUCHAR)tch)); continue; } if (_T('%') == *format) { number = 0; prevchar = 0; width = widthset = started = 0; #ifdef _SECURE_SCANF original_array_width = array_width = 0; enomem = 0; #endif /* _SECURE_SCANF */ fl_wchar_arg = done_flag = suppress = negative = reject = 0; widechar = 0; longone = 1; #if _INTEGRAL_MAX_BITS >= 64 integer64 = 0; #endif /* _INTEGRAL_MAX_BITS >= 64 */ while (!done_flag) { comchr = *++format; if (_ISDIGIT((_TUCHAR)comchr)) { ++widthset; width = MUL10(width) + (comchr - _T('0')); } else switch (comchr) { case _T('F') : case _T('N') : /* no way to push NEAR in large model */ break; /* NEAR is default in small model */ case _T('h') : /* set longone to 0 */ --longone; --widechar; /* set widechar = -1 */ break; #if _INTEGRAL_MAX_BITS >= 64 case _T('I'): if ( (*(format + 1) == _T('6')) && (*(format + 2) == _T('4')) ) { format += 2; ++integer64; num64 = 0; break; } else if ( (*(format + 1) == _T('3')) && (*(format + 2) == _T('2')) ) { format += 2; break; } else if ( (*(format + 1) == _T('d')) || (*(format + 1) == _T('i')) || (*(format + 1) == _T('o')) || (*(format + 1) == _T('x')) || (*(format + 1) == _T('X')) ) { if (sizeof(void*) == sizeof(__int64)) { ++integer64; num64 = 0; } break; } if (sizeof(void*) == sizeof(__int64)) { ++integer64; num64 = 0; } goto DEFAULT_LABEL; #endif /* _INTEGRAL_MAX_BITS >= 64 */ case _T('L') : /* ++longone; */ ++longone; break; case _T('q'): ++integer64; num64 = 0; break; case _T('l') : if (*(format + 1) == _T('l')) { ++format; #ifdef LONGLONG_IS_INT64 ++integer64; num64 = 0; break; #else /* LONGLONG_IS_INT64 */ ++longone; /* NOBREAK */ #endif /* LONGLONG_IS_INT64 */ } else { ++longone; /* NOBREAK */ } FALLTHROUGH; case _T('w') : ++widechar; /* set widechar = 1 */ break; case _T('*') : ++suppress; break; default: DEFAULT_LABEL: ++done_flag; break; } } if (!suppress) { va_copy(arglistsave, arglist); pointer = va_arg(arglist,void *); } else { pointer = NULL; // doesn't matter what value we use here - we're only using it as a flag } done_flag = 0; if (!widechar) { /* use case if not explicitly specified */ if ((*format == _T('S')) || (*format == _T('C'))) #ifdef _UNICODE --widechar; else ++widechar; #else /* _UNICODE */ ++widechar; else --widechar; #endif /* _UNICODE */ } /* switch to lowercase to allow %E,%G, and to keep the switch table small */ comchr = *format | (_T('a') - _T('A')); if (_T('n') != comchr) { if (_T('c') != comchr && LEFT_BRACKET != comchr) ch = EAT_WHITE(); else ch = INC(); } if (_T('n') != comchr) { if (_TEOF == ch) goto error_return; } if (!widthset || width) { #ifdef _SECURE_SCANF if(!suppress && (comchr == _T('c') || comchr == _T('s') || comchr == LEFT_BRACKET)) { va_copy(arglist, arglistsave); /* Reinitialize pointer to point to the array to which we write the input */ pointer = va_arg(arglist, void*); va_copy(arglistsave, arglist); /* Get the next argument - size of the array in characters */ #ifdef HOST_64BIT original_array_width = array_width = (size_t)(va_arg(arglist, unsigned int)); #else /* HOST_64BIT */ original_array_width = array_width = va_arg(arglist, size_t); #endif /* HOST_64BIT */ if(array_width < 1) { if (widechar > 0) *(char16_t UNALIGNED *)pointer = L'\0'; else *(char *)pointer = '\0'; errno = ENOMEM; goto error_return; } } #endif /* _SECURE_SCANF */ switch(comchr) { case _T('c'): /* case _T('C'): */ if (!widthset) { ++widthset; ++width; } if (widechar > 0) fl_wchar_arg++; goto scanit; case _T('s'): /* case _T('S'): */ if(widechar > 0) fl_wchar_arg++; goto scanit; case LEFT_BRACKET : /* scanset */ if (widechar>0) fl_wchar_arg++; scanptr = (_TUCHAR *)(++format); if (_T('^') == *scanptr) { ++scanptr; --reject; /* set reject to 255 */ } /* Allocate "table" on first %[] spec */ #if ALLOC_TABLE if (table == NULL) { table = (char*)_malloc_crt(TABLESIZE); if ( table == NULL) goto error_return; malloc_flag = 1; } #endif /* ALLOC_TABLE */ memset(table, 0, TABLESIZE); if (LEFT_BRACKET == comchr) if (_T(']') == *scanptr) { prevchar = _T(']'); ++scanptr; table[ _T(']') >> 3] = 1 << (_T(']') & 7); } while (_T(']') != *scanptr) { rngch = *scanptr++; if (_T('-') != rngch || !prevchar || /* first char */ _T(']') == *scanptr) /* last char */ table[(prevchar = rngch) >> 3] |= 1 << (rngch & 7); else { /* handle a-z type set */ rngch = *scanptr++; /* get end of range */ if (prevchar < rngch) /* %[a-z] */ last = rngch; else { /* %[z-a] */ last = prevchar; prevchar = rngch; } for (rngch = prevchar; rngch <= last; ++rngch) table[rngch >> 3] |= 1 << (rngch & 7); prevchar = 0; } } if (!*scanptr) goto error_return; /* trunc'd format string */ /* scanset completed. Now read string */ if (LEFT_BRACKET == comchr) format = scanptr; scanit: start = pointer; /* * execute the format directive. that is, scan input * characters until the directive is fulfilled, eof * is reached, or a non-matching character is * encountered. * * it is important not to get the next character * unless that character needs to be tested! other- * wise, reads from line-buffered devices (e.g., * scanf()) would require an extra, spurious, newline * if the first newline completes the current format * directive. */ UN_INC(ch); #ifdef _SECURE_SCANF /* One element is needed for '\0' for %s & %[ */ if(comchr != _T('c')) { --array_width; } #endif /* _SECURE_SCANF */ while ( !widthset || width-- ) { ch = INC(); if ( #ifndef CPRFLAG (_TEOF != ch) && #endif /* CPRFLAG */ // char conditions ( ( comchr == _T('c')) || // string conditions !isspace() ( ( comchr == _T('s') && (!(ch >= _T('\t') && ch <= _T('\r')) && ch != _T(' ')))) || // BRACKET conditions ( (comchr == LEFT_BRACKET) && ((table[ch >> 3] ^ reject) & (1 << (ch & 7))) ) ) ) { if (!suppress) { #ifdef _SECURE_SCANF if(!array_width) { /* We have exhausted the user's buffer */ enomem = 1; break; } #endif /* _SECURE_SCANF */ #ifndef _UNICODE if (fl_wchar_arg) { wctemp = W('?'); char temp[2]; temp[0] = (char) ch; #if 0 // we are not supporting multibyte input strings if (isleadbyte((unsigned char)ch)) { temp[1] = (char) INC(); } #endif /* 0 */ _MBTOWC(&wctemp, temp, MB_CUR_MAX); *(char16_t UNALIGNED *)pointer = wctemp; /* just copy W('?') if mbtowc fails, errno is set by mbtowc */ pointer = (char16_t *)pointer + 1; #ifdef _SECURE_SCANF --array_width; #endif /* _SECURE_SCANF */ } else #else /* _UNICODE */ if (fl_wchar_arg) { *(char16_t UNALIGNED *)pointer = ch; pointer = (char16_t *)pointer + 1; #ifdef _SECURE_SCANF --array_width; #endif /* _SECURE_SCANF */ } else #endif /* _UNICODE */ { #ifndef _UNICODE *(char *)pointer = (char)ch; pointer = (char *)pointer + 1; #ifdef _SECURE_SCANF --array_width; #endif /* _SECURE_SCANF */ #else /* _UNICODE */ int temp = 0; #ifndef _SECURE_SCANF /* convert wide to multibyte */ if (_ERRCHECK_EINVAL_ERANGE(wctomb_s(&temp, (char *)pointer, MB_LEN_MAX, ch)) == 0) { /* do nothing if wctomb fails, errno will be set to EILSEQ */ pointer = (char *)pointer + temp; } #else /* _SECURE_SCANF */ /* convert wide to multibyte */ if (array_width >= ((size_t)MB_CUR_MAX)) { _BEGIN_SECURE_CRT_DEPRECATION_DISABLE temp = wctomb((char *)pointer, ch); _END_SECURE_CRT_DEPRECATION_DISABLE } else { char tmpbuf[MB_LEN_MAX]; _BEGIN_SECURE_CRT_DEPRECATION_DISABLE temp = wctomb(tmpbuf, ch); _END_SECURE_CRT_DEPRECATION_DISABLE if (temp > 0 && ((size_t)temp) > array_width) { /* We have exhausted the user's buffer */ enomem = 1; break; } memcpy(pointer, tmpbuf, temp); } if (temp > 0) { /* do nothing if wctomb fails, errno will be set to EILSEQ */ pointer = (char *)pointer + temp; array_width -= temp; } #endif /* _SECURE_SCANF */ #endif /* _UNICODE */ } } /* suppress */ else { /* just indicate a match */ start = (_TCHAR *)start + 1; } } else { UN_INC(ch); break; } } /* make sure something has been matched and, if assignment is not suppressed, null-terminate output string if comchr != c */ #ifdef _SECURE_SCANF if(enomem) { errno = ENOMEM; /* In case of error, blank out the input buffer */ if (fl_wchar_arg) { _RESET_STRING(((char16_t UNALIGNED *)start), original_array_width); } else { _RESET_STRING(((char *)start), original_array_width); } goto error_return; } #endif /* _SECURE_SCANF */ if (start != pointer) { if (!suppress) { ++count; if ('c' != comchr) /* null-terminate strings */ { if (fl_wchar_arg) { *(char16_t UNALIGNED *)pointer = L'\0'; #ifdef _SECURE_SCANF _FILL_STRING(((char16_t UNALIGNED *)start), original_array_width, ((char16_t UNALIGNED *)pointer - (char16_t UNALIGNED *)start + 1)) #endif /* _SECURE_SCANF */ } else { *(char *)pointer = '\0'; #ifdef _SECURE_SCANF _FILL_STRING(((char *)start), original_array_width, ((char *)pointer - (char *)start + 1)) #endif /* _SECURE_SCANF */ } } } else { // supress set, do nothing } } else goto error_return; break; case _T('i') : /* could be d, o, or x */ comchr = _T('d'); /* use as default */ FALLTHROUGH; case _T('x'): if (_T('-') == ch) { ++negative; goto x_incwidth; } else if (_T('+') == ch) { x_incwidth: if (!--width && widthset) ++done_flag; else ch = INC(); } if (_T('0') == ch) { if (_T('x') == (_TCHAR)(ch = INC()) || _T('X') == (_TCHAR)ch) { ch = INC(); if (widthset) { width -= 2; if (width < 1) ++done_flag; } comchr = _T('x'); } else { ++started; if (_T('x') != comchr) { if (widthset && !--width) ++done_flag; comchr = _T('o'); } else { /* scanning a hex number that starts */ /* with a 0. push back the character */ /* currently in ch and restore the 0 */ UN_INC(ch); ch = _T('0'); } } } goto getnum; /* NOTREACHED */ case _T('p') : /* force %hp to be treated as %p */ longone = 1; #ifdef HOST_64BIT /* force %p to be 64 bit in WIN64 */ ++integer64; num64 = 0; #endif /* HOST_64BIT */ FALLTHROUGH; case _T('o') : case _T('u') : case _T('d') : if (_T('-') == ch) { ++negative; goto d_incwidth; } else if (_T('+') == ch) { d_incwidth: if (!--width && widthset) ++done_flag; else ch = INC(); } getnum: #if _INTEGRAL_MAX_BITS >= 64 if ( integer64 ) { while (!done_flag) { if (_T('x') == comchr || _T('p') == comchr) if (_ISXDIGIT(ch)) { num64 <<= 4; ch = _hextodec(ch); } else ++done_flag; else if (_ISDIGIT(ch)) if (_T('o') == comchr) if (_T('8') > ch) num64 <<= 3; else { ++done_flag; } else /* _T('d') == comchr */ num64 = MUL10(num64); else ++done_flag; if (!done_flag) { ++started; num64 += ch - _T('0'); if (widthset && !--width) ++done_flag; else ch = INC(); } else UN_INC(ch); } /* end of WHILE loop */ if (negative) num64 = (uint64_t )(-(__int64)num64); } else { #endif /* _INTEGRAL_MAX_BITS >= 64 */ while (!done_flag) { if (_T('x') == comchr || _T('p') == comchr) if (_ISXDIGIT(ch)) { number = (number << 4); ch = _hextodec(ch); } else ++done_flag; else if (_ISDIGIT(ch)) if (_T('o') == comchr) if (_T('8') > ch) number = (number << 3); else { ++done_flag; } else /* _T('d') == comchr */ number = MUL10(number); else ++done_flag; if (!done_flag) { ++started; number += ch - _T('0'); if (widthset && !--width) ++done_flag; else ch = INC(); } else UN_INC(ch); } /* end of WHILE loop */ if (negative) number = (unsigned long)(-(long)number); #if _INTEGRAL_MAX_BITS >= 64 } #endif /* _INTEGRAL_MAX_BITS >= 64 */ if (_T('F')==comchr) /* expected ':' in long pointer */ started = 0; if (started) if (!suppress) { ++count; assign_num: #if _INTEGRAL_MAX_BITS >= 64 if ( integer64 ) *(__int64 UNALIGNED *)pointer = ( uint64_t )num64; else #endif /* _INTEGRAL_MAX_BITS >= 64 */ if (longone) *(int UNALIGNED *)pointer = (unsigned int)number; else *(short UNALIGNED *)pointer = (unsigned short)number; } else /*NULL*/; else goto error_return; break; case _T('n') : /* char count, don't inc return value */ number = charcount; if(!suppress) goto assign_num; /* found in number code above */ break; case _T('e') : /* case _T('E') : */ case _T('f') : case _T('g') : /* scan a float */ /* case _T('G') : */ nFloatStrUsed=0; if (_T('-') == ch) { pFloatStr[nFloatStrUsed++] = _T('-'); goto f_incwidth; } else if (_T('+') == ch) { f_incwidth: --width; ch = INC(); } if (!widthset) /* must watch width */ width = -1; /* now get integral part */ while (_ISDIGIT(ch) && width--) { ++started; pFloatStr[nFloatStrUsed++] = (char)ch; if (__check_float_string(nFloatStrUsed, &nFloatStrSz, &pFloatStr, floatstring, &malloc_FloatStrFlag )==FALSE) { goto error_return; } ch = INC(); } #ifdef _UNICODE /* convert decimal point to wide-char */ /* if mbtowc fails (should never happen), we use L'.' */ decimal = L'.'; _MBTOWC(&decimal, _INTRN_LOCALE_CONV(_loc_update)->decimal_point, MB_CUR_MAX); #else /* _UNICODE */ decimal=*((_INTRN_LOCALE_CONV(_loc_update))->decimal_point); #endif /* _UNICODE */ /* now check for decimal */ if (decimal == (char)ch && width--) { ch = INC(); pFloatStr[nFloatStrUsed++] = decimal; if (__check_float_string(nFloatStrUsed, &nFloatStrSz, &pFloatStr, floatstring, &malloc_FloatStrFlag )==FALSE) { goto error_return; } while (_ISDIGIT(ch) && width--) { ++started; pFloatStr[nFloatStrUsed++] = (_TCHAR)ch; if (__check_float_string(nFloatStrUsed, &nFloatStrSz, &pFloatStr, floatstring, &malloc_FloatStrFlag )==FALSE) { goto error_return; } ch = INC(); } } /* now check for exponent */ if (started && (_T('e') == ch || _T('E') == ch) && width--) { pFloatStr[nFloatStrUsed++] = _T('e'); if (__check_float_string(nFloatStrUsed, &nFloatStrSz, &pFloatStr, floatstring, &malloc_FloatStrFlag )==FALSE) { goto error_return; } if (_T('-') == (ch = INC())) { pFloatStr[nFloatStrUsed++] = _T('-'); if (__check_float_string(nFloatStrUsed, &nFloatStrSz, &pFloatStr, floatstring, &malloc_FloatStrFlag )==FALSE) { goto error_return; } goto f_incwidth2; } else if (_T('+') == ch) { f_incwidth2: if (!width--) ++width; else ch = INC(); } while (_ISDIGIT(ch) && width--) { ++started; pFloatStr[nFloatStrUsed++] = (_TCHAR)ch; if (__check_float_string(nFloatStrUsed, &nFloatStrSz, &pFloatStr, floatstring, &malloc_FloatStrFlag )==FALSE) { goto error_return; } ch = INC(); } } UN_INC(ch); if (started) if (!suppress) { ++count; pFloatStr[nFloatStrUsed]= _T('\0'); #ifdef _UNICODE _WFASSIGN( longone-1, pointer, pFloatStr, (char)decimal, _loc_update.GetLocaleT()); #else /* _UNICODE */ _FASSIGN( longone-1, pointer, pFloatStr, (char)decimal, _loc_update.GetLocaleT()); #endif /* _UNICODE */ } else /*NULL */; else goto error_return; break; default: /* either found '%' or something else */ if ((int)*format != (int)ch) { UN_INC(ch); #ifdef _SECURE_SCANF /* error_return ASSERT's if format_error is true */ format_error = TRUE; #endif /* _SECURE_SCANF */ goto error_return; } else match--; /* % found, compensate for inc below */ if (!suppress) va_copy(arglist, arglistsave); } /* SWITCH */ match++; /* matched a format field - set flag */ } /* WHILE (width) */ else { /* zero-width field in format string */ UN_INC(ch); /* check for input error */ goto error_return; } ++format; /* skip to next char */ } else /* ('%' != *format) */ { if ((int)*format++ != (int)(ch = INC())) { UN_INC(ch); goto error_return; } #if 0 // we are not supporting multibyte input strings #ifndef _UNICODE if (isleadbyte((unsigned char)ch)) { int ch2; if ((int)*format++ != (ch2=INC())) { UN_INC(ch2); UN_INC(ch); goto error_return; } --charcount; /* only count as one character read */ } #endif /* _UNICODE */ #endif } #ifndef CPRFLAG if ( (_TEOF == ch) && ((*format != _T('%')) || (*(format + 1) != _T('n'))) ) break; #endif /* CPRFLAG */ } /* WHILE (*format) */ error_return: #if ALLOC_TABLE if (malloc_flag == 1) { _free_crt(table); } #endif /* ALLOC_TABLE */ if (malloc_FloatStrFlag == 1) { _free_crt(pFloatStr); } #ifndef CPRFLAG if (_TEOF == ch) /* If any fields were matched or assigned, return count */ return ( (count || match) ? count : EOF); else #endif /* CPRFLAG */ #ifdef _SECURE_SCANF if(format_error == TRUE) { _VALIDATE_RETURN( ("Invalid Input Format" && 0), EINVAL, count); } #endif /* _SECURE_SCANF */ return count; } /* _hextodec() returns a value of 0-15 and expects a char 0-9, a-f, A-F */ /* _inc() is the one place where we put the actual getc code. */ /* _whiteout() returns the first non-blank character, as defined by isspace() */ static int __cdecl _hextodec ( _TCHAR chr) { return _ISDIGIT(chr) ? chr : (chr & ~(_T('a') - _T('A'))) - _T('A') + 10 + _T('0'); } #ifdef CPRFLAG static int __cdecl _inc(void) { return (_gettche_nolock()); } static void __cdecl _un_inc(int chr) { if (_TEOF != chr) { _ungettch_nolock(chr); } } static int __cdecl _whiteout(REG1 int* counter) { REG2 int ch; do { ++*counter; ch = _inc(); if (ch == _TEOF) { break; } } while(_istspace((_TUCHAR)ch)); return ch; } #else /* CPRFLAG */ static int __cdecl _inc(miniFILE* fileptr) { return (_gettc_nolock(fileptr)); } static void __cdecl _un_inc(int chr, miniFILE* fileptr) { if (_TEOF != chr) { _ungettc_nolock(chr,fileptr); } } static int __cdecl _whiteout(int* counter, miniFILE* fileptr) { int ch; do { ++*counter; ch = _inc(fileptr); if (ch == _TEOF) { break; } } while(_istspace((_TUCHAR)ch)); return ch; } #endif /* CPRFLAG */
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*** *input.c - C formatted input, used by scanf, etc. * * *Purpose: * defines _input() to do formatted input; called from scanf(), * etc. functions. This module defines _cscanf() instead when * CPRFLAG is defined. The file cscanf.c defines that symbol * and then includes this file in order to implement _cscanf(). * *Note: * this file is included in safecrt.lib build directly, plese refer * to safecrt_[w]input_s.c * *******************************************************************************/ #define ALLOW_RANGE /* enable "%[a-z]"-style scansets */ /* temporary work-around for compiler without 64-bit support */ #ifndef _INTEGRAL_MAX_BITS #define _INTEGRAL_MAX_BITS 64 #endif /* _INTEGRAL_MAX_BITS */ // typedef __int64_t __int64; #ifndef FALSE #define FALSE 0 #endif #ifndef TRUE #define TRUE 1 #endif #define UNALIGNED #define _CVTBUFSIZE (309+40) /* # of digits in max. dp value + slop */ #define _MBTOWC(x,y,z) _minimal_chartowchar( x, y ) #define _istspace(x) isspace((unsigned char)x) #define _malloc_crt PAL_malloc #define _realloc_crt PAL_realloc #define _free_crt PAL_free #define _FASSIGN(flag, argument, number, dec_point, locale) _safecrt_fassign((flag), (argument), (number)) #define _WFASSIGN(flag, argument, number, dec_point, locale) _safecrt_wfassign((flag), (argument), (number)) #if defined (UNICODE) #define ALLOC_TABLE 1 #else /* defined (UNICODE) */ #define ALLOC_TABLE 0 #endif /* defined (UNICODE) */ #define HEXTODEC(chr) _hextodec(chr) #define LEFT_BRACKET ('[' | ('a' - 'A')) /* 'lowercase' version */ static int __cdecl _hextodec(_TCHAR); #ifdef CPRFLAG #define INC() (++charcount, _inc()) #define UN_INC(chr) (--charcount, _un_inc(chr)) #define EAT_WHITE() _whiteout(&charcount) static int __cdecl _inc(void); static void __cdecl _un_inc(int); static int __cdecl _whiteout(int *); #else /* CPRFLAG */ #define INC() (++charcount, _inc(stream)) #define UN_INC(chr) (--charcount, _un_inc(chr, stream)) #define EAT_WHITE() _whiteout(&charcount, stream) static int __cdecl _inc(miniFILE *); static void __cdecl _un_inc(int, miniFILE *); static int __cdecl _whiteout(int *, miniFILE *); #endif /* CPRFLAG */ #undef _ISDIGIT #undef _ISXDIGIT #ifndef _UNICODE #define _ISDIGIT(chr) isdigit((unsigned char)chr) #define _ISXDIGIT(chr) isxdigit((unsigned char)chr) #else /* _UNICODE */ #define _ISDIGIT(chr) ( !(chr & 0xff00) && isdigit( ((chr) & 0x00ff) ) ) #define _ISXDIGIT(chr) ( !(chr & 0xff00) && isxdigit( ((chr) & 0x00ff) ) ) #endif /* _UNICODE */ #define MUL10(x) ( (((x)<<2) + (x))<<1 ) #define LONGLONG_IS_INT64 1 /* 1 means long long is same as int64 0 means long long is same as long */ /*** * int __check_float_string(size_t,size_t *, _TCHAR**, _TCHAR*, int*) * * Purpose: * Check if there is enough space insert onemore character in the given * block, if not then allocate more memory. * * Return: * FALSE if more memory needed and the reallocation failed. * *******************************************************************************/ static int __check_float_string(size_t nFloatStrUsed, size_t *pnFloatStrSz, _TCHAR **pFloatStr, _TCHAR *floatstring, int *pmalloc_FloatStrFlag) { void *tmpPointer; _ASSERTE(nFloatStrUsed<=(*pnFloatStrSz)); if (nFloatStrUsed==(*pnFloatStrSz)) { size_t newSize; // Will (*pnFloatStrSz) * 2 * sizeof(_TCHAR) overflow? if ( *pnFloatStrSz > (SIZE_T_MAX / 2 / sizeof(_TCHAR))) { return FALSE; } newSize = *pnFloatStrSz * 2 * sizeof(_TCHAR); if ((*pFloatStr)==floatstring) { if (((*pFloatStr)=(_TCHAR *)_malloc_crt(newSize))==NULL) { return FALSE; } (*pmalloc_FloatStrFlag)=1; memcpy((*pFloatStr),floatstring,(*pnFloatStrSz)*sizeof(_TCHAR)); (*pnFloatStrSz)*=2; } else { if ((tmpPointer=(_TCHAR *)_realloc_crt((*pFloatStr), newSize))==NULL) { return FALSE; } (*pFloatStr)=(_TCHAR *)(tmpPointer); (*pnFloatStrSz)*=2; } } return TRUE; } #define ASCII 32 /* # of bytes needed to hold 256 bits */ #define SCAN_SHORT 0 /* also for FLOAT */ #define SCAN_LONG 1 /* also for DOUBLE */ #define SCAN_L_DOUBLE 2 /* only for LONG DOUBLE */ #define SCAN_NEAR 0 #define SCAN_FAR 1 #ifndef _UNICODE #define TABLESIZE ASCII #else /* _UNICODE */ #define TABLESIZE (ASCII * 256) #endif /* _UNICODE */ /*** *int _input(stream, format, arglist), static int input(format, arglist) * *Purpose: * get input items (data items or literal matches) from the input stream * and assign them if appropriate to the items thru the arglist. this * function is intended for internal library use only, not for the user * * The _input entry point is for the normal scanf() functions * The input entry point is used when compiling for _cscanf() [CPRFLAF * defined] and is a static function called only by _cscanf() -- reads from * console. * * This code also defines _input_s, which works differently for %c, %s & %[. * For these, _input_s first picks up the next argument from the variable * argument list & uses it as the maximum size of the character array pointed * to by the next argument in the list. * *Entry: * FILE *stream - file to read from * char *format - format string to determine the data to read * arglist - list of pointer to data items * *Exit: * returns number of items assigned and fills in data items * returns EOF if error or EOF found on stream before 1st data item matched * *Exceptions: * *******************************************************************************/ #define _INTRN_LOCALE_CONV( x ) localeconv() #ifndef _UNICODE int __cdecl __tinput_s (miniFILE* stream, const _TUCHAR* format, va_list arglist) #else int __cdecl __twinput_s (miniFILE* stream, const _TUCHAR* format, va_list arglist) #endif /* _UNICODE */ { _TCHAR floatstring[_CVTBUFSIZE + 1]; _TCHAR *pFloatStr=floatstring; size_t nFloatStrUsed=0; size_t nFloatStrSz=sizeof(floatstring)/sizeof(floatstring[0]); int malloc_FloatStrFlag=0; unsigned long number; /* temp hold-value */ #if ALLOC_TABLE char *table = NULL; /* which chars allowed for %[] */ int malloc_flag = 0; /* is "table" allocated on the heap? */ #else /* ALLOC_TABLE */ char AsciiTable[TABLESIZE]; char *table = AsciiTable; #endif /* ALLOC_TABLE */ #if _INTEGRAL_MAX_BITS >= 64 uint64_t num64 = 0LL; /* temp for 64-bit integers */ #endif /* _INTEGRAL_MAX_BITS >= 64 */ void *pointer=NULL; /* points to user data receptacle */ void *start; /* indicate non-empty string */ #ifndef _UNICODE char16_t wctemp=L'\0'; #endif /* _UNICODE */ _TUCHAR *scanptr; /* for building "table" data */ int ch = 0; int charcount; /* total number of chars read */ int comchr; /* holds designator type */ int count; /* return value. # of assignments */ int started; /* indicate good number */ int width; /* width of field */ int widthset; /* user has specified width */ #ifdef _SECURE_SCANF size_t array_width = 0; size_t original_array_width = 0; int enomem = 0; int format_error = FALSE; #endif /* _SECURE_SCANF */ /* Neither coerceshort nor farone are need for the 386 */ char done_flag; /* general purpose loop monitor */ char longone; /* 0 = SHORT, 1 = LONG, 2 = L_DOUBLE */ #if _INTEGRAL_MAX_BITS >= 64 int integer64; /* 1 for 64-bit integer, 0 otherwise */ #endif /* _INTEGRAL_MAX_BITS >= 64 */ signed char widechar; /* -1 = char, 0 = ????, 1 = char16_t */ char reject; /* %[^ABC] instead of %[ABC] */ char negative; /* flag for '-' detected */ char suppress; /* don't assign anything */ char match; /* flag: !0 if any fields matched */ va_list arglistsave; /* save arglist value */ char fl_wchar_arg; /* flags wide char/string argument */ _TCHAR decimal; _TUCHAR rngch; _TUCHAR last; _TUCHAR prevchar; _TCHAR tch; _VALIDATE_RETURN( (format != NULL), EINVAL, EOF); #ifndef CPRFLAG _VALIDATE_RETURN( (stream != NULL), EINVAL, EOF); #endif /* CPRFLAG */ /* count = # fields assigned charcount = # chars read match = flag indicating if any fields were matched [Note that we need both count and match. For example, a field may match a format but have assignments suppressed. In this case, match will get set, but 'count' will still equal 0. We need to distinguish 'match vs no-match' when terminating due to EOF.] */ count = charcount = match = 0; while (*format) { if (_istspace((_TUCHAR)*format)) { UN_INC(EAT_WHITE()); /* put first non-space char back */ do { tch = *++format; } while (_istspace((_TUCHAR)tch)); continue; } if (_T('%') == *format) { number = 0; prevchar = 0; width = widthset = started = 0; #ifdef _SECURE_SCANF original_array_width = array_width = 0; enomem = 0; #endif /* _SECURE_SCANF */ fl_wchar_arg = done_flag = suppress = negative = reject = 0; widechar = 0; longone = 1; #if _INTEGRAL_MAX_BITS >= 64 integer64 = 0; #endif /* _INTEGRAL_MAX_BITS >= 64 */ while (!done_flag) { comchr = *++format; if (_ISDIGIT((_TUCHAR)comchr)) { ++widthset; width = MUL10(width) + (comchr - _T('0')); } else switch (comchr) { case _T('F') : case _T('N') : /* no way to push NEAR in large model */ break; /* NEAR is default in small model */ case _T('h') : /* set longone to 0 */ --longone; --widechar; /* set widechar = -1 */ break; #if _INTEGRAL_MAX_BITS >= 64 case _T('I'): if ( (*(format + 1) == _T('6')) && (*(format + 2) == _T('4')) ) { format += 2; ++integer64; num64 = 0; break; } else if ( (*(format + 1) == _T('3')) && (*(format + 2) == _T('2')) ) { format += 2; break; } else if ( (*(format + 1) == _T('d')) || (*(format + 1) == _T('i')) || (*(format + 1) == _T('o')) || (*(format + 1) == _T('x')) || (*(format + 1) == _T('X')) ) { if (sizeof(void*) == sizeof(__int64)) { ++integer64; num64 = 0; } break; } if (sizeof(void*) == sizeof(__int64)) { ++integer64; num64 = 0; } goto DEFAULT_LABEL; #endif /* _INTEGRAL_MAX_BITS >= 64 */ case _T('L') : /* ++longone; */ ++longone; break; case _T('q'): ++integer64; num64 = 0; break; case _T('l') : if (*(format + 1) == _T('l')) { ++format; #ifdef LONGLONG_IS_INT64 ++integer64; num64 = 0; break; #else /* LONGLONG_IS_INT64 */ ++longone; /* NOBREAK */ #endif /* LONGLONG_IS_INT64 */ } else { ++longone; /* NOBREAK */ } FALLTHROUGH; case _T('w') : ++widechar; /* set widechar = 1 */ break; case _T('*') : ++suppress; break; default: DEFAULT_LABEL: ++done_flag; break; } } if (!suppress) { va_copy(arglistsave, arglist); pointer = va_arg(arglist,void *); } else { pointer = NULL; // doesn't matter what value we use here - we're only using it as a flag } done_flag = 0; if (!widechar) { /* use case if not explicitly specified */ if ((*format == _T('S')) || (*format == _T('C'))) #ifdef _UNICODE --widechar; else ++widechar; #else /* _UNICODE */ ++widechar; else --widechar; #endif /* _UNICODE */ } /* switch to lowercase to allow %E,%G, and to keep the switch table small */ comchr = *format | (_T('a') - _T('A')); if (_T('n') != comchr) { if (_T('c') != comchr && LEFT_BRACKET != comchr) ch = EAT_WHITE(); else ch = INC(); } if (_T('n') != comchr) { if (_TEOF == ch) goto error_return; } if (!widthset || width) { #ifdef _SECURE_SCANF if(!suppress && (comchr == _T('c') || comchr == _T('s') || comchr == LEFT_BRACKET)) { va_copy(arglist, arglistsave); /* Reinitialize pointer to point to the array to which we write the input */ pointer = va_arg(arglist, void*); va_copy(arglistsave, arglist); /* Get the next argument - size of the array in characters */ #ifdef HOST_64BIT original_array_width = array_width = (size_t)(va_arg(arglist, unsigned int)); #else /* HOST_64BIT */ original_array_width = array_width = va_arg(arglist, size_t); #endif /* HOST_64BIT */ if(array_width < 1) { if (widechar > 0) *(char16_t UNALIGNED *)pointer = L'\0'; else *(char *)pointer = '\0'; errno = ENOMEM; goto error_return; } } #endif /* _SECURE_SCANF */ switch(comchr) { case _T('c'): /* case _T('C'): */ if (!widthset) { ++widthset; ++width; } if (widechar > 0) fl_wchar_arg++; goto scanit; case _T('s'): /* case _T('S'): */ if(widechar > 0) fl_wchar_arg++; goto scanit; case LEFT_BRACKET : /* scanset */ if (widechar>0) fl_wchar_arg++; scanptr = (_TUCHAR *)(++format); if (_T('^') == *scanptr) { ++scanptr; --reject; /* set reject to 255 */ } /* Allocate "table" on first %[] spec */ #if ALLOC_TABLE if (table == NULL) { table = (char*)_malloc_crt(TABLESIZE); if ( table == NULL) goto error_return; malloc_flag = 1; } #endif /* ALLOC_TABLE */ memset(table, 0, TABLESIZE); if (LEFT_BRACKET == comchr) if (_T(']') == *scanptr) { prevchar = _T(']'); ++scanptr; table[ _T(']') >> 3] = 1 << (_T(']') & 7); } while (_T(']') != *scanptr) { rngch = *scanptr++; if (_T('-') != rngch || !prevchar || /* first char */ _T(']') == *scanptr) /* last char */ table[(prevchar = rngch) >> 3] |= 1 << (rngch & 7); else { /* handle a-z type set */ rngch = *scanptr++; /* get end of range */ if (prevchar < rngch) /* %[a-z] */ last = rngch; else { /* %[z-a] */ last = prevchar; prevchar = rngch; } for (rngch = prevchar; rngch <= last; ++rngch) table[rngch >> 3] |= 1 << (rngch & 7); prevchar = 0; } } if (!*scanptr) goto error_return; /* trunc'd format string */ /* scanset completed. Now read string */ if (LEFT_BRACKET == comchr) format = scanptr; scanit: start = pointer; /* * execute the format directive. that is, scan input * characters until the directive is fulfilled, eof * is reached, or a non-matching character is * encountered. * * it is important not to get the next character * unless that character needs to be tested! other- * wise, reads from line-buffered devices (e.g., * scanf()) would require an extra, spurious, newline * if the first newline completes the current format * directive. */ UN_INC(ch); #ifdef _SECURE_SCANF /* One element is needed for '\0' for %s & %[ */ if(comchr != _T('c')) { --array_width; } #endif /* _SECURE_SCANF */ while ( !widthset || width-- ) { ch = INC(); if ( #ifndef CPRFLAG (_TEOF != ch) && #endif /* CPRFLAG */ // char conditions ( ( comchr == _T('c')) || // string conditions !isspace() ( ( comchr == _T('s') && (!(ch >= _T('\t') && ch <= _T('\r')) && ch != _T(' ')))) || // BRACKET conditions ( (comchr == LEFT_BRACKET) && ((table[ch >> 3] ^ reject) & (1 << (ch & 7))) ) ) ) { if (!suppress) { #ifdef _SECURE_SCANF if(!array_width) { /* We have exhausted the user's buffer */ enomem = 1; break; } #endif /* _SECURE_SCANF */ #ifndef _UNICODE if (fl_wchar_arg) { wctemp = W('?'); char temp[2]; temp[0] = (char) ch; #if 0 // we are not supporting multibyte input strings if (isleadbyte((unsigned char)ch)) { temp[1] = (char) INC(); } #endif /* 0 */ _MBTOWC(&wctemp, temp, MB_CUR_MAX); *(char16_t UNALIGNED *)pointer = wctemp; /* just copy W('?') if mbtowc fails, errno is set by mbtowc */ pointer = (char16_t *)pointer + 1; #ifdef _SECURE_SCANF --array_width; #endif /* _SECURE_SCANF */ } else #else /* _UNICODE */ if (fl_wchar_arg) { *(char16_t UNALIGNED *)pointer = ch; pointer = (char16_t *)pointer + 1; #ifdef _SECURE_SCANF --array_width; #endif /* _SECURE_SCANF */ } else #endif /* _UNICODE */ { #ifndef _UNICODE *(char *)pointer = (char)ch; pointer = (char *)pointer + 1; #ifdef _SECURE_SCANF --array_width; #endif /* _SECURE_SCANF */ #else /* _UNICODE */ int temp = 0; #ifndef _SECURE_SCANF /* convert wide to multibyte */ if (_ERRCHECK_EINVAL_ERANGE(wctomb_s(&temp, (char *)pointer, MB_LEN_MAX, ch)) == 0) { /* do nothing if wctomb fails, errno will be set to EILSEQ */ pointer = (char *)pointer + temp; } #else /* _SECURE_SCANF */ /* convert wide to multibyte */ if (array_width >= ((size_t)MB_CUR_MAX)) { temp = wctomb((char *)pointer, ch); } else { char tmpbuf[MB_LEN_MAX]; temp = wctomb(tmpbuf, ch); if (temp > 0 && ((size_t)temp) > array_width) { /* We have exhausted the user's buffer */ enomem = 1; break; } memcpy(pointer, tmpbuf, temp); } if (temp > 0) { /* do nothing if wctomb fails, errno will be set to EILSEQ */ pointer = (char *)pointer + temp; array_width -= temp; } #endif /* _SECURE_SCANF */ #endif /* _UNICODE */ } } /* suppress */ else { /* just indicate a match */ start = (_TCHAR *)start + 1; } } else { UN_INC(ch); break; } } /* make sure something has been matched and, if assignment is not suppressed, null-terminate output string if comchr != c */ #ifdef _SECURE_SCANF if(enomem) { errno = ENOMEM; /* In case of error, blank out the input buffer */ if (fl_wchar_arg) { _RESET_STRING(((char16_t UNALIGNED *)start), original_array_width); } else { _RESET_STRING(((char *)start), original_array_width); } goto error_return; } #endif /* _SECURE_SCANF */ if (start != pointer) { if (!suppress) { ++count; if ('c' != comchr) /* null-terminate strings */ { if (fl_wchar_arg) { *(char16_t UNALIGNED *)pointer = L'\0'; #ifdef _SECURE_SCANF _FILL_STRING(((char16_t UNALIGNED *)start), original_array_width, ((char16_t UNALIGNED *)pointer - (char16_t UNALIGNED *)start + 1)) #endif /* _SECURE_SCANF */ } else { *(char *)pointer = '\0'; #ifdef _SECURE_SCANF _FILL_STRING(((char *)start), original_array_width, ((char *)pointer - (char *)start + 1)) #endif /* _SECURE_SCANF */ } } } else { // supress set, do nothing } } else goto error_return; break; case _T('i') : /* could be d, o, or x */ comchr = _T('d'); /* use as default */ FALLTHROUGH; case _T('x'): if (_T('-') == ch) { ++negative; goto x_incwidth; } else if (_T('+') == ch) { x_incwidth: if (!--width && widthset) ++done_flag; else ch = INC(); } if (_T('0') == ch) { if (_T('x') == (_TCHAR)(ch = INC()) || _T('X') == (_TCHAR)ch) { ch = INC(); if (widthset) { width -= 2; if (width < 1) ++done_flag; } comchr = _T('x'); } else { ++started; if (_T('x') != comchr) { if (widthset && !--width) ++done_flag; comchr = _T('o'); } else { /* scanning a hex number that starts */ /* with a 0. push back the character */ /* currently in ch and restore the 0 */ UN_INC(ch); ch = _T('0'); } } } goto getnum; /* NOTREACHED */ case _T('p') : /* force %hp to be treated as %p */ longone = 1; #ifdef HOST_64BIT /* force %p to be 64 bit in WIN64 */ ++integer64; num64 = 0; #endif /* HOST_64BIT */ FALLTHROUGH; case _T('o') : case _T('u') : case _T('d') : if (_T('-') == ch) { ++negative; goto d_incwidth; } else if (_T('+') == ch) { d_incwidth: if (!--width && widthset) ++done_flag; else ch = INC(); } getnum: #if _INTEGRAL_MAX_BITS >= 64 if ( integer64 ) { while (!done_flag) { if (_T('x') == comchr || _T('p') == comchr) if (_ISXDIGIT(ch)) { num64 <<= 4; ch = _hextodec(ch); } else ++done_flag; else if (_ISDIGIT(ch)) if (_T('o') == comchr) if (_T('8') > ch) num64 <<= 3; else { ++done_flag; } else /* _T('d') == comchr */ num64 = MUL10(num64); else ++done_flag; if (!done_flag) { ++started; num64 += ch - _T('0'); if (widthset && !--width) ++done_flag; else ch = INC(); } else UN_INC(ch); } /* end of WHILE loop */ if (negative) num64 = (uint64_t )(-(__int64)num64); } else { #endif /* _INTEGRAL_MAX_BITS >= 64 */ while (!done_flag) { if (_T('x') == comchr || _T('p') == comchr) if (_ISXDIGIT(ch)) { number = (number << 4); ch = _hextodec(ch); } else ++done_flag; else if (_ISDIGIT(ch)) if (_T('o') == comchr) if (_T('8') > ch) number = (number << 3); else { ++done_flag; } else /* _T('d') == comchr */ number = MUL10(number); else ++done_flag; if (!done_flag) { ++started; number += ch - _T('0'); if (widthset && !--width) ++done_flag; else ch = INC(); } else UN_INC(ch); } /* end of WHILE loop */ if (negative) number = (unsigned long)(-(long)number); #if _INTEGRAL_MAX_BITS >= 64 } #endif /* _INTEGRAL_MAX_BITS >= 64 */ if (_T('F')==comchr) /* expected ':' in long pointer */ started = 0; if (started) if (!suppress) { ++count; assign_num: #if _INTEGRAL_MAX_BITS >= 64 if ( integer64 ) *(__int64 UNALIGNED *)pointer = ( uint64_t )num64; else #endif /* _INTEGRAL_MAX_BITS >= 64 */ if (longone) *(int UNALIGNED *)pointer = (unsigned int)number; else *(short UNALIGNED *)pointer = (unsigned short)number; } else /*NULL*/; else goto error_return; break; case _T('n') : /* char count, don't inc return value */ number = charcount; if(!suppress) goto assign_num; /* found in number code above */ break; case _T('e') : /* case _T('E') : */ case _T('f') : case _T('g') : /* scan a float */ /* case _T('G') : */ nFloatStrUsed=0; if (_T('-') == ch) { pFloatStr[nFloatStrUsed++] = _T('-'); goto f_incwidth; } else if (_T('+') == ch) { f_incwidth: --width; ch = INC(); } if (!widthset) /* must watch width */ width = -1; /* now get integral part */ while (_ISDIGIT(ch) && width--) { ++started; pFloatStr[nFloatStrUsed++] = (char)ch; if (__check_float_string(nFloatStrUsed, &nFloatStrSz, &pFloatStr, floatstring, &malloc_FloatStrFlag )==FALSE) { goto error_return; } ch = INC(); } #ifdef _UNICODE /* convert decimal point to wide-char */ /* if mbtowc fails (should never happen), we use L'.' */ decimal = L'.'; _MBTOWC(&decimal, _INTRN_LOCALE_CONV(_loc_update)->decimal_point, MB_CUR_MAX); #else /* _UNICODE */ decimal=*((_INTRN_LOCALE_CONV(_loc_update))->decimal_point); #endif /* _UNICODE */ /* now check for decimal */ if (decimal == (char)ch && width--) { ch = INC(); pFloatStr[nFloatStrUsed++] = decimal; if (__check_float_string(nFloatStrUsed, &nFloatStrSz, &pFloatStr, floatstring, &malloc_FloatStrFlag )==FALSE) { goto error_return; } while (_ISDIGIT(ch) && width--) { ++started; pFloatStr[nFloatStrUsed++] = (_TCHAR)ch; if (__check_float_string(nFloatStrUsed, &nFloatStrSz, &pFloatStr, floatstring, &malloc_FloatStrFlag )==FALSE) { goto error_return; } ch = INC(); } } /* now check for exponent */ if (started && (_T('e') == ch || _T('E') == ch) && width--) { pFloatStr[nFloatStrUsed++] = _T('e'); if (__check_float_string(nFloatStrUsed, &nFloatStrSz, &pFloatStr, floatstring, &malloc_FloatStrFlag )==FALSE) { goto error_return; } if (_T('-') == (ch = INC())) { pFloatStr[nFloatStrUsed++] = _T('-'); if (__check_float_string(nFloatStrUsed, &nFloatStrSz, &pFloatStr, floatstring, &malloc_FloatStrFlag )==FALSE) { goto error_return; } goto f_incwidth2; } else if (_T('+') == ch) { f_incwidth2: if (!width--) ++width; else ch = INC(); } while (_ISDIGIT(ch) && width--) { ++started; pFloatStr[nFloatStrUsed++] = (_TCHAR)ch; if (__check_float_string(nFloatStrUsed, &nFloatStrSz, &pFloatStr, floatstring, &malloc_FloatStrFlag )==FALSE) { goto error_return; } ch = INC(); } } UN_INC(ch); if (started) if (!suppress) { ++count; pFloatStr[nFloatStrUsed]= _T('\0'); #ifdef _UNICODE _WFASSIGN( longone-1, pointer, pFloatStr, (char)decimal, _loc_update.GetLocaleT()); #else /* _UNICODE */ _FASSIGN( longone-1, pointer, pFloatStr, (char)decimal, _loc_update.GetLocaleT()); #endif /* _UNICODE */ } else /*NULL */; else goto error_return; break; default: /* either found '%' or something else */ if ((int)*format != (int)ch) { UN_INC(ch); #ifdef _SECURE_SCANF /* error_return ASSERT's if format_error is true */ format_error = TRUE; #endif /* _SECURE_SCANF */ goto error_return; } else match--; /* % found, compensate for inc below */ if (!suppress) va_copy(arglist, arglistsave); } /* SWITCH */ match++; /* matched a format field - set flag */ } /* WHILE (width) */ else { /* zero-width field in format string */ UN_INC(ch); /* check for input error */ goto error_return; } ++format; /* skip to next char */ } else /* ('%' != *format) */ { if ((int)*format++ != (int)(ch = INC())) { UN_INC(ch); goto error_return; } #if 0 // we are not supporting multibyte input strings #ifndef _UNICODE if (isleadbyte((unsigned char)ch)) { int ch2; if ((int)*format++ != (ch2=INC())) { UN_INC(ch2); UN_INC(ch); goto error_return; } --charcount; /* only count as one character read */ } #endif /* _UNICODE */ #endif } #ifndef CPRFLAG if ( (_TEOF == ch) && ((*format != _T('%')) || (*(format + 1) != _T('n'))) ) break; #endif /* CPRFLAG */ } /* WHILE (*format) */ error_return: #if ALLOC_TABLE if (malloc_flag == 1) { _free_crt(table); } #endif /* ALLOC_TABLE */ if (malloc_FloatStrFlag == 1) { _free_crt(pFloatStr); } #ifndef CPRFLAG if (_TEOF == ch) /* If any fields were matched or assigned, return count */ return ( (count || match) ? count : EOF); else #endif /* CPRFLAG */ #ifdef _SECURE_SCANF if(format_error == TRUE) { _VALIDATE_RETURN( ("Invalid Input Format" && 0), EINVAL, count); } #endif /* _SECURE_SCANF */ return count; } /* _hextodec() returns a value of 0-15 and expects a char 0-9, a-f, A-F */ /* _inc() is the one place where we put the actual getc code. */ /* _whiteout() returns the first non-blank character, as defined by isspace() */ static int __cdecl _hextodec ( _TCHAR chr) { return _ISDIGIT(chr) ? chr : (chr & ~(_T('a') - _T('A'))) - _T('A') + 10 + _T('0'); } #ifdef CPRFLAG static int __cdecl _inc(void) { return (_gettche_nolock()); } static void __cdecl _un_inc(int chr) { if (_TEOF != chr) { _ungettch_nolock(chr); } } static int __cdecl _whiteout(REG1 int* counter) { REG2 int ch; do { ++*counter; ch = _inc(); if (ch == _TEOF) { break; } } while(_istspace((_TUCHAR)ch)); return ch; } #else /* CPRFLAG */ static int __cdecl _inc(miniFILE* fileptr) { return (_gettc_nolock(fileptr)); } static void __cdecl _un_inc(int chr, miniFILE* fileptr) { if (_TEOF != chr) { _ungettc_nolock(chr,fileptr); } } static int __cdecl _whiteout(int* counter, miniFILE* fileptr) { int ch; do { ++*counter; ch = _inc(fileptr); if (ch == _TEOF) { break; } } while(_istspace((_TUCHAR)ch)); return ch; } #endif /* CPRFLAG */
1
dotnet/runtime
66,234
Remove compiler warning suppression
Contributes to https://github.com/dotnet/runtime/issues/66154 All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address. ~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~ Upstream PR: https://github.com/libunwind/libunwind/pull/333 /cc @GrabYourPitchforks
AaronRobinsonMSFT
2022-03-05T03:45:49Z
2022-03-09T00:57:12Z
220e67755ae6323a01011b83070dff6f84b02519
853e494abd1c1e12d28a76829b4680af7f694afa
Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154 All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address. ~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~ Upstream PR: https://github.com/libunwind/libunwind/pull/333 /cc @GrabYourPitchforks
./src/coreclr/pal/src/safecrt/internal.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*** *internal.h - contains declarations of internal routines and variables * * *Purpose: * Declares routines and variables used internally by the C run-time. * * [Internal] * ****/ #if _MSC_VER > 1000 #pragma once #endif /* _MSC_VER > 1000 */ #ifndef _INC_INTERNAL #define _INC_INTERNAL #include <crtdefs.h> #ifdef __cplusplus extern "C" { #endif /* __cplusplus */ #include <cruntime.h> #include <limits.h> /* * Conditionally include windows.h to pick up the definition of * CRITICAL_SECTION. */ #include <windows.h> #ifdef _MSC_VER #pragma pack(push,_CRT_PACKING) #endif /* _MSC_VER */ /* Define function types used in several startup sources */ typedef void (__cdecl *_PVFV)(void); typedef int (__cdecl *_PIFV)(void); typedef void (__cdecl *_PVFI)(int); #if _MSC_VER >= 1400 && defined(_M_CEE) typedef const void* (__clrcall *_PVFVM)(void); typedef int (__clrcall *_PIFVM)(void); typedef void (__clrcall *_CPVFV)(void); #endif /* _MSC_VER >= 1400 && defined(_M_CEE) */ #if defined (_M_CEE_PURE) || (defined (_DLL) && defined (_M_IX86)) /* Retained for compatibility with VC++ 5.0 and earlier versions */ _CRTIMP int * __cdecl __p__commode(void); #endif /* defined (_M_CEE_PURE) || (defined (_DLL) && defined (_M_IX86)) */ #if defined (SPECIAL_CRTEXE) && defined (_DLL) extern int _commode; #else /* defined (SPECIAL_CRTEXE) && defined (_DLL) */ #ifndef _M_CEE_PURE _CRTIMP extern int _commode; #else /* _M_CEE_PURE */ #define _commode (*__p___commode()) #endif /* _M_CEE_PURE */ #endif /* defined (SPECIAL_CRTEXE) && defined (_DLL) */ #define __IOINFO_TM_ANSI 0 /* Regular Text */ #define __IOINFO_TM_UTF8 1 /* UTF8 Encoded */ #define __IOINFO_TM_UTF16LE 2 /* UTF16 Little Endian Encoded */ /* * Control structure for lowio file handles */ typedef struct { intptr_t osfhnd; /* underlying OS file HANDLE */ char osfile; /* attributes of file (e.g., open in text mode?) */ char pipech; /* one char buffer for handles opened on pipes */ int lockinitflag; CRITICAL_SECTION lock; #ifndef _SAFECRT_IMPL /* Not used in the safecrt downlevel. We do not define them, so we cannot use them accidentally */ char textmode : 7; /* __IOINFO_TM_ANSI or __IOINFO_TM_UTF8 or __IOINFO_TM_UTF16LE */ char unicode : 1; /* Was the file opened as unicode? */ char pipech2[2]; /* 2 more peak ahead chars for UNICODE mode */ #endif /* _SAFECRT_IMPL */ } ioinfo; /* * Definition of IOINFO_L2E, the log base 2 of the number of elements in each * array of ioinfo structs. */ #define IOINFO_L2E 5 /* * Definition of IOINFO_ARRAY_ELTS, the number of elements in ioinfo array */ #define IOINFO_ARRAY_ELTS (1 << IOINFO_L2E) /* * Definition of IOINFO_ARRAYS, maximum number of supported ioinfo arrays. */ #define IOINFO_ARRAYS 64 #define _NHANDLE_ (IOINFO_ARRAYS * IOINFO_ARRAY_ELTS) #define _TZ_STRINGS_SIZE 64 /* * Access macros for getting at an ioinfo struct and its fields from a * file handle */ #define _pioinfo(i) ( __pioinfo[(i) >> IOINFO_L2E] + ((i) & (IOINFO_ARRAY_ELTS - \ 1)) ) #define _osfhnd(i) ( _pioinfo(i)->osfhnd ) #define _osfile(i) ( _pioinfo(i)->osfile ) #define _pipech(i) ( _pioinfo(i)->pipech ) #define _pipech2(i) ( _pioinfo(i)->pipech2 ) #define _textmode(i) ( _pioinfo(i)->textmode ) #define _tm_unicode(i) ( _pioinfo(i)->unicode ) /* * Safer versions of the above macros. Currently, only _osfile_safe is * used. */ #define _pioinfo_safe(i) ( (((i) != -1) && ((i) != -2)) ? _pioinfo(i) : &__badioinfo ) #define _osfhnd_safe(i) ( _pioinfo_safe(i)->osfhnd ) #define _osfile_safe(i) ( _pioinfo_safe(i)->osfile ) #define _pipech_safe(i) ( _pioinfo_safe(i)->pipech ) #define _pipech2_safe(i) ( _pioinfo_safe(i)->pipech2 ) #ifdef _SAFECRT_IMPL /* safecrt does not have support for textmode, so we always return __IOINFO_TM_ANSI */ #define _textmode_safe(i) __IOINFO_TM_ANSI #define _tm_unicode_safe(i) 0 #else /* _SAFECRT_IMPL */ #define _textmode_safe(i) ( _pioinfo_safe(i)->textmode ) #define _tm_unicode_safe(i) ( _pioinfo_safe(i)->unicode ) #endif /* _SAFECRT_IMPL */ #ifndef _M_CEE_PURE #ifdef _SAFECRT_IMPL /* We need to get this from the downlevel DLL, even when we build safecrt.lib */ extern __declspec(dllimport) ioinfo __badioinfo; extern __declspec(dllimport) ioinfo * __pioinfo[]; #else /* _SAFECRT_IMPL */ /* * Special, static ioinfo structure used only for more graceful handling * of a C file handle value of -1 (results from common errors at the stdio * level). */ extern _CRTIMP ioinfo __badioinfo; /* * Array of arrays of control structures for lowio files. */ extern _CRTIMP ioinfo * __pioinfo[]; #endif /* _SAFECRT_IMPL */ #endif /* _M_CEE_PURE */ /* * Current number of allocated ioinfo structures (_NHANDLE_ is the upper * limit). */ extern int _nhandle; int __cdecl _alloc_osfhnd(void); int __cdecl _free_osfhnd(int); int __cdecl _set_osfhnd(int, intptr_t); /* fileno for stdout, stdin & stderr when there is no console */ #define _NO_CONSOLE_FILENO (intptr_t)-2 extern const char __dnames[]; extern const char __mnames[]; extern int _days[]; extern int _lpdays[]; extern __time32_t __cdecl __loctotime32_t(int, int, int, int, int, int, int); extern __time64_t __cdecl __loctotime64_t(int, int, int, int, int, int, int); extern void __cdecl __tzset(void); extern int __cdecl _validdrive(unsigned); /* * If we are only interested in years between 1901 and 2099, we could use this: * * #define IS_LEAP_YEAR(y) (y % 4 == 0) */ #define IS_LEAP_YEAR(y) (((y) % 4 == 0 && (y) % 100 != 0) || (y) % 400 == 0) /* * This variable is in the C start-up; the length must be kept synchronized * It is used by the *cenvarg.c modules */ extern char _acfinfo[]; /* "_C_FILE_INFO=" */ #define CFI_LENGTH 12 /* "_C_FILE_INFO" is 12 bytes long */ /* * stdio internals */ #ifndef _FILE_DEFINED struct _iobuf { char *_ptr; int _cnt; char *_base; int _flag; int _file; int _charbuf; int _bufsiz; char *_tmpfname; }; typedef struct _iobuf FILE; #define _FILE_DEFINED #endif /* _FILE_DEFINED */ #if !defined (_FILEX_DEFINED) && defined (_WINDOWS_) /* * Variation of FILE type used for the dynamically allocated portion of * __piob[]. For single thread, _FILEX is the same as FILE. For multithread * models, _FILEX has two fields: the FILE struct and the CRITICAL_SECTION * struct used to serialize access to the FILE. */ typedef struct { FILE f; CRITICAL_SECTION lock; } _FILEX; #define _FILEX_DEFINED #endif /* !defined (_FILEX_DEFINED) && defined (_WINDOWS_) */ /* * Number of entries supported in the array pointed to by __piob[]. That is, * the number of stdio-level files which may be open simultaneously. This * is normally set to _NSTREAM_ by the stdio initialization code. */ extern int _nstream; /* * Pointer to the array of pointers to FILE/_FILEX structures that are used * to manage stdio-level files. */ extern void **__piob; FILE * __cdecl _getstream(void); FILE * __cdecl _openfile(_In_z_ const char * _Filename, _In_z_ const char * _Mode, _In_ int _ShFlag, _Out_ FILE * _File); FILE * __cdecl _wopenfile(_In_z_ const char16_t * _Filename, _In_z_ const char16_t * _Mode, _In_ int _ShFlag, _Out_ FILE * _File); void __cdecl _getbuf(_Out_ FILE * _File); int __cdecl _filwbuf (__inout FILE * _File); int __cdecl _flswbuf(_In_ int _Ch, __inout FILE * _File); void __cdecl _freebuf(__inout FILE * _File); int __cdecl _stbuf(__inout FILE * _File); void __cdecl _ftbuf(int _Flag, __inout FILE * _File); #ifdef _SAFECRT_IMPL int __cdecl _output(__inout FILE * _File, _In_z_ __format_string const char *_Format, va_list _ArgList); int __cdecl _woutput(__inout FILE * _File, _In_z_ __format_string const char16_t *_Format, va_list _ArgList); int __cdecl _output_s(__inout FILE * _File, _In_z_ __format_string const char *_Format, va_list _ArgList); int __cdecl _output_p(__inout FILE * _File, _In_z_ __format_string const char *_Format, va_list _ArgList); int __cdecl _woutput_s(__inout FILE * _File, _In_z_ __format_string const char16_t *_Format, va_list _ArgList); int __cdecl _woutput_p(__inout FILE * _File, _In_z_ __format_string const char16_t *_Format, va_list _ArgList); typedef int (*OUTPUTFN)(FILE *, const char *, va_list); typedef int (*WOUTPUTFN)(FILE *, const char16_t *, va_list); #else /* _SAFECRT_IMPL */ int __cdecl _output_l(__inout FILE * _File, _In_z_ __format_string const char *_Format, _In_opt_ _locale_t _Locale, va_list _ArgList); int __cdecl _woutput_l(__inout FILE * _File, _In_z_ __format_string const char16_t *_Format, _In_opt_ _locale_t _Locale, va_list _ArgList); int __cdecl _output_s_l(__inout FILE * _File, _In_z_ __format_string const char *_Format, _In_opt_ _locale_t _Locale, va_list _ArgList); int __cdecl _output_p_l(__inout FILE * _File, _In_z_ __format_string const char *_Format, _In_opt_ _locale_t _Locale, va_list _ArgList); int __cdecl _woutput_s_l(__inout FILE * _File, _In_z_ __format_string const char16_t *_Format, _In_opt_ _locale_t _Locale, va_list _ArgList); int __cdecl _woutput_p_l(__inout FILE * _File, _In_z_ __format_string const char16_t *_Format, _In_opt_ _locale_t _Locale, va_list _ArgList); typedef int (*OUTPUTFN)(__inout FILE * _File, const char *, _locale_t, va_list); typedef int (*WOUTPUTFN)(__inout FILE * _File, const char16_t *, _locale_t, va_list); #endif /* _SAFECRT_IMPL */ #ifdef _SAFECRT_IMPL int __cdecl _input(_In_ FILE * _File, _In_z_ __format_string const unsigned char * _Format, va_list _ArgList); int __cdecl _winput(_In_ FILE * _File, _In_z_ __format_string const char16_t * _Format, va_list _ArgList); int __cdecl _input_s(_In_ FILE * _File, _In_z_ __format_string const unsigned char * _Format, va_list _ArgList); int __cdecl _winput_s(_In_ FILE * _File, _In_z_ __format_string const char16_t * _Format, va_list _ArgList); typedef int (*INPUTFN)(FILE *, const unsigned char *, va_list); typedef int (*WINPUTFN)(FILE *, const char16_t *, va_list); #else /* _SAFECRT_IMPL */ int __cdecl _input_l(__inout FILE * _File, _In_z_ __format_string const unsigned char *, _In_opt_ _locale_t _Locale, va_list _ArgList); int __cdecl _winput_l(__inout FILE * _File, _In_z_ __format_string const char16_t *, _In_opt_ _locale_t _Locale, va_list _ArgList); int __cdecl _input_s_l(__inout FILE * _File, _In_z_ __format_string const unsigned char *, _In_opt_ _locale_t _Locale, va_list _ArgList); int __cdecl _winput_s_l(__inout FILE * _File, _In_z_ __format_string const char16_t *, _In_opt_ _locale_t _Locale, va_list _ArgList); typedef int (*INPUTFN)(FILE *, const unsigned char *, _locale_t, va_list); typedef int (*WINPUTFN)(FILE *, const char16_t *, _locale_t, va_list); #ifdef _UNICODE #define TINPUTFN WINPUTFN #else /* _UNICODE */ #define TINPUTFN INPUTFN #endif /* _UNICODE */ #endif /* _SAFECRT_IMPL */ int __cdecl _flush(__inout FILE * _File); void __cdecl _endstdio(void); errno_t __cdecl _sopen_helper(_In_z_ const char * _Filename, _In_ int _OFlag, _In_ int _ShFlag, _In_ int _PMode, _Out_ int * _PFileHandle, int _BSecure); errno_t __cdecl _wsopen_helper(_In_z_ const char16_t * _Filename, _In_ int _OFlag, _In_ int _ShFlag, _In_ int _PMode, _Out_ int * _PFileHandle, int _BSecure); #ifndef CRTDLL extern int _cflush; #endif /* CRTDLL */ extern unsigned int _tempoff; extern unsigned int _old_pfxlen; extern int _umaskval; /* the umask value */ extern char _pipech[]; /* pipe lookahead */ extern char _exitflag; /* callable termination flag */ extern int _C_Termination_Done; /* termination done flag */ char * __cdecl _getpath(_In_z_ const char * _Src, _Out_writes_z_(_SizeInChars) char * _Dst, _In_ size_t _SizeInChars); char16_t * __cdecl _wgetpath(_In_z_ const char16_t * _Src, _Out_writes_z_(_SizeInWords) char16_t * _Dst, _In_ size_t _SizeInWords); extern int _dowildcard; /* flag to enable argv[] wildcard expansion */ #ifndef _PNH_DEFINED typedef int (__cdecl * _PNH)( size_t ); #define _PNH_DEFINED #endif /* _PNH_DEFINED */ #if _MSC_VER >= 1400 && defined(_M_CEE) #ifndef __MPNH_DEFINED typedef int (__clrcall * __MPNH)( size_t ); #define __MPNH_DEFINED #endif /* __MPNH_DEFINED */ #endif /* _MSC_VER >= 1400 && defined(_M_CEE) */ /* calls the currently installed new handler */ int __cdecl _callnewh(_In_ size_t _Size); extern int _newmode; /* malloc new() handler mode */ /* pointer to initial environment block that is passed to [w]main */ #ifndef _M_CEE_PURE extern _CRTIMP char16_t **__winitenv; extern _CRTIMP char **__initenv; #endif /* _M_CEE_PURE */ /* _calloca helper */ #define _calloca(count, size) ((count<=0 || size<=0 || ((((size_t)_HEAP_MAXREQ) / ((size_t)count)) < ((size_t)size)))? NULL : _malloca(count * size)) /* startup set values */ extern char *_aenvptr; /* environment ptr */ extern char16_t *_wenvptr; /* wide environment ptr */ /* command line */ #if defined (_DLL) _CRTIMP char ** __cdecl __p__acmdln(void); _CRTIMP char16_t ** __cdecl __p__wcmdln(void); #endif /* defined (_DLL) */ #ifndef _M_CEE_PURE _CRTIMP extern char *_acmdln; _CRTIMP extern char16_t *_wcmdln; #else /* _M_CEE_PURE */ #define _acmdln (*__p__acmdln()) #define _wcmdln (*__p__wcmdln()) #endif /* _M_CEE_PURE */ /* * prototypes for internal startup functions */ int __cdecl _cwild(void); /* wild.c */ int __cdecl _wcwild(void); /* wwild.c */ int __cdecl _mtinit(void); /* tidtable.c */ void __cdecl _mtterm(void); /* tidtable.c */ int __cdecl _mtinitlocks(void); /* mlock.c */ void __cdecl _mtdeletelocks(void); /* mlock.c */ int __cdecl _mtinitlocknum(int); /* mlock.c */ /* Wrapper for InitializeCriticalSection API, with default spin count */ int __cdecl __crtInitCritSecAndSpinCount(PCRITICAL_SECTION, DWORD); #define _CRT_SPINCOUNT 4000 /* * C source build only!!!! * * more prototypes for internal startup functions */ void __cdecl _amsg_exit(int); /* crt0.c */ void __cdecl __crtExitProcess(int); /* crt0dat.c */ void __cdecl __crtCorExitProcess(int); /* crt0dat.c */ void __cdecl __crtdll_callstaticterminators(void); /* crt0dat.c */ /* _cinit now allows the caller to suppress floating point precision init This allows the DLLs that use the CRT to not initialise FP precision, allowing the EXE's setting to persist even when a DLL is loaded */ int __cdecl _cinit(int /* initFloatingPrecision */); /* crt0dat.c */ void __cdecl __doinits(void); /* astart.asm */ void __cdecl __doterms(void); /* astart.asm */ void __cdecl __dopreterms(void); /* astart.asm */ void __cdecl _FF_MSGBANNER(void); void __cdecl _fpmath(int /*initPrecision*/); void __cdecl _fpclear(void); void __cdecl _fptrap(void); /* crt0fp.c */ int __cdecl _heap_init(int); void __cdecl _heap_term(void); void __cdecl _heap_abort(void); void __cdecl __initconin(void); /* initcon.c */ void __cdecl __initconout(void); /* initcon.c */ int __cdecl _ioinit(void); /* crt0.c, crtlib.c */ void __cdecl _ioterm(void); /* crt0.c, crtlib.c */ char * __cdecl _GET_RTERRMSG(int); void __cdecl _NMSG_WRITE(int); int __CRTDECL _setargv(void); /* setargv.c, stdargv.c */ int __CRTDECL __setargv(void); /* stdargv.c */ int __CRTDECL _wsetargv(void); /* wsetargv.c, wstdargv.c */ int __CRTDECL __wsetargv(void); /* wstdargv.c */ int __cdecl _setenvp(void); /* stdenvp.c */ int __cdecl _wsetenvp(void); /* wstdenvp.c */ void __cdecl __setmbctable(unsigned int); /* mbctype.c */ #ifdef MRTDLL _MRTIMP int __cdecl _onexit_process(_CPVFV); _MRTIMP int __cdecl _onexit_app_domain(_CPVFV); #endif /* MRTDLL */ #ifndef _MANAGED_MAIN int __CRTDECL main(_In_ int _Argc, _In_reads_z_(_Argc) char ** _Argv, _In_z_ char ** _Env); int __CRTDECL wmain(_In_ int _Argc, _In_reads_z_(_Argc) char16_t ** _Argv, _In_z_ char16_t ** _Env); #endif /* _MANAGED_MAIN */ /* helper functions for wide/multibyte environment conversion */ int __cdecl __mbtow_environ (void); int __cdecl __wtomb_environ (void); /* These two functions take a char ** for the environment option At some point during their execution, they take ownership of the memory block passed in using option. At this point, they NULL out the incoming char * / char16_t * to ensure there is no double-free */ int __cdecl __crtsetenv(_Outptr_opt_ char ** _POption, _In_ const int _Primary); int __cdecl __crtwsetenv(_Outptr_opt_ char16_t ** _POption, _In_ const int _Primary); #ifndef _M_CEE_PURE _CRTIMP extern void (__cdecl * _aexit_rtn)(int); #endif /* _M_CEE_PURE */ #if defined (_DLL) || defined (CRTDLL) #ifndef _STARTUP_INFO_DEFINED typedef struct { int newmode; } _startupinfo; #define _STARTUP_INFO_DEFINED #endif /* _STARTUP_INFO_DEFINED */ _CRTIMP int __cdecl __getmainargs(_Out_ int * _Argc, _Outptr_result_buffer_(*_Argc) char *** _Argv, _Outptr_opt_ char *** _Env, _In_ int _DoWildCard, _In_ _startupinfo * _StartInfo); _CRTIMP int __cdecl __wgetmainargs(_Out_ int * _Argc, _Outptr_result_buffer_(*_Argc)char16_t *** _Argv, _Outptr_opt_ char16_t *** _Env, _In_ int _DoWildCard, _In_ _startupinfo * _StartInfo); #endif /* defined (_DLL) || defined (CRTDLL) */ /* * Prototype, variables and constants which determine how error messages are * written out. */ #define _UNKNOWN_APP 0 #define _CONSOLE_APP 1 #define _GUI_APP 2 extern int __app_type; #if !defined (_M_CEE_PURE) extern Volatile<void*> __native_startup_lock; #define __NO_REASON UINT_MAX extern Volatile<unsigned int> __native_dllmain_reason; extern Volatile<unsigned int> __native_vcclrit_reason; #if defined (__cplusplus) #pragma warning(push) #pragma warning(disable: 4483) #if _MSC_FULL_VER >= 140050415 #define _NATIVE_STARTUP_NAMESPACE __identifier("<CrtImplementationDetails>") #else /* _MSC_FULL_VER >= 140050415 */ #define _NATIVE_STARTUP_NAMESPACE __CrtImplementationDetails #endif /* _MSC_FULL_VER >= 140050415 */ namespace _NATIVE_STARTUP_NAMESPACE { class NativeDll { private: static const unsigned int ProcessDetach = 0; static const unsigned int ProcessAttach = 1; static const unsigned int ThreadAttach = 2; static const unsigned int ThreadDetach = 3; static const unsigned int ProcessVerifier = 4; public: inline static bool IsInDllMain() { return (__native_dllmain_reason != __NO_REASON); } inline static bool IsInProcessAttach() { return (__native_dllmain_reason == ProcessAttach); } inline static bool IsInProcessDetach() { return (__native_dllmain_reason == ProcessDetach); } inline static bool IsInVcclrit() { return (__native_vcclrit_reason != __NO_REASON); } inline static bool IsSafeForManagedCode() { if (!IsInDllMain()) { return true; } if (IsInVcclrit()) { return true; } return !IsInProcessAttach() && !IsInProcessDetach(); } }; } #pragma warning(pop) #endif /* defined (__cplusplus) */ #endif /* !defined (_M_CEE_PURE) */ extern int __error_mode; _CRTIMP void __cdecl __set_app_type(int); #if defined (CRTDLL) && !defined (_SYSCRT) /* * All these function pointer are used for creating global state of CRT * functions. Either all of them will be set or all of them will be NULL */ typedef void (__cdecl *_set_app_type_function)(int); typedef int (__cdecl *_get_app_type_function)(); extern _set_app_type_function __set_app_type_server; extern _get_app_type_function __get_app_type_server; #endif /* defined (CRTDLL) && !defined (_SYSCRT) */ /* * C source build only!!!! * * map Win32 errors into Xenix errno values -- for modules written in C */ _CRTIMP void __cdecl _dosmaperr(unsigned long); extern int __cdecl _get_errno_from_oserr(unsigned long); /* * internal routines used by the exec/spawn functions */ extern intptr_t __cdecl _dospawn(_In_ int _Mode, _In_opt_z_ const char * _Name, __inout_z char * _Cmd, _In_opt_z_ char * _Env); extern intptr_t __cdecl _wdospawn(_In_ int _Mode, _In_opt_z_ const char16_t * _Name, __inout_z char16_t * _Cmd, _In_opt_z_ char16_t * _Env); extern int __cdecl _cenvarg(_In_z_ const char * const * _Argv, _In_opt_z_ const char * const * _Env, _Outptr_opt_ char ** _ArgBlk, _Outptr_opt_ char ** _EnvBlk, _In_z_ const char *_Name); extern int __cdecl _wcenvarg(_In_z_ const char16_t * const * _Argv, _In_opt_z_ const char16_t * const * _Env, _Outptr_opt_ char16_t ** _ArgBlk, _Outptr_opt_ char16_t ** _EnvBlk, _In_z_ const char16_t * _Name); #ifndef _M_IX86 extern char ** _capture_argv(_In_ va_list *, _In_z_ const char * _FirstArg, _Out_writes_z_(_MaxCount) char ** _Static_argv, _In_ size_t _MaxCount); extern char16_t ** _wcapture_argv(_In_ va_list *, _In_z_ const char16_t * _FirstArg, _Out_writes_z_(_MaxCount) char16_t ** _Static_argv, _In_ size_t _MaxCount); #endif /* _M_IX86 */ /* * internal routine used by the abort */ extern _PHNDLR __cdecl __get_sigabrt(void); /* * Type from ntdef.h */ typedef LONG NTSTATUS; /* * Exception code used in _invalid_parameter */ #ifndef STATUS_INVALID_PARAMETER #define STATUS_INVALID_PARAMETER ((NTSTATUS)0xC000000DL) #endif /* STATUS_INVALID_PARAMETER */ /* * Exception code used for abort and _CALL_REPORTFAULT */ #ifndef STATUS_FATAL_APP_EXIT #define STATUS_FATAL_APP_EXIT ((NTSTATUS)0x40000015L) #endif /* STATUS_FATAL_APP_EXIT */ /* * Validate functions */ #include <crtdbg.h> /* _ASSERTE */ #include <errno.h> #define __STR2WSTR(str) L##str #define _STR2WSTR(str) __STR2WSTR(str) #define __FILEW__ _STR2WSTR(__FILE__) #define __FUNCTIONW__ _STR2WSTR(__FUNCTION__) /* We completely fill the buffer only in debug (see _SECURECRT__FILL_STRING * and _SECURECRT__FILL_BYTE macros). */ #if !defined (_SECURECRT_FILL_BUFFER) #ifdef _DEBUG #define _SECURECRT_FILL_BUFFER 1 #else /* _DEBUG */ #define _SECURECRT_FILL_BUFFER 0 #endif /* _DEBUG */ #endif /* !defined (_SECURECRT_FILL_BUFFER) */ #ifndef _SAFECRT_IMPL /* _invalid_parameter is already defined in safecrt.h and safecrt.lib */ #if !defined (_NATIVE_char16_t_DEFINED) && defined (_M_CEE_PURE) extern "C++" #endif /* !defined (_NATIVE_char16_t_DEFINED) && defined (_M_CEE_PURE) */ _CRTIMP #endif /* _SAFECRT_IMPL */ void __cdecl _invalid_parameter(_In_opt_z_ const char16_t *, _In_opt_z_ const char16_t *, _In_opt_z_ const char16_t *, unsigned int, uintptr_t); #if !defined (_NATIVE_char16_t_DEFINED) && defined (_M_CEE_PURE) extern "C++" #endif /* !defined (_NATIVE_char16_t_DEFINED) && defined (_M_CEE_PURE) */ _CRTIMP void __cdecl _invoke_watson(_In_opt_z_ const char16_t *, _In_opt_z_ const char16_t *, _In_opt_z_ const char16_t *, unsigned int, uintptr_t); #ifndef _DEBUG #if !defined (_NATIVE_char16_t_DEFINED) && defined (_M_CEE_PURE) extern "C++" #endif /* !defined (_NATIVE_char16_t_DEFINED) && defined (_M_CEE_PURE) */ _CRTIMP void __cdecl _invalid_parameter_noinfo(void); #endif /* _DEBUG */ /* Invoke Watson if _ExpressionError is not 0; otherwise simply return _EspressionError */ __forceinline void _invoke_watson_if_error( errno_t _ExpressionError, const char16_t *_Expression, const char16_t *_Function, const char16_t *_File, unsigned int _Line, uintptr_t _Reserved ) { if (_ExpressionError == 0) { return; } _invoke_watson(_Expression, _Function, _File, _Line, _Reserved); } /* Invoke Watson if _ExpressionError is not 0 and equal to _ErrorValue1 or _ErrorValue2; otherwise simply return _EspressionError */ __forceinline errno_t _invoke_watson_if_oneof( errno_t _ExpressionError, errno_t _ErrorValue1, errno_t _ErrorValue2, const char16_t *_Expression, const char16_t *_Function, const char16_t *_File, unsigned int _Line, uintptr_t _Reserved ) { if (_ExpressionError == 0 || (_ExpressionError != _ErrorValue1 && _ExpressionError != _ErrorValue2)) { return _ExpressionError; } _invoke_watson(_Expression, _Function, _File, _Line, _Reserved); return _ExpressionError; } /* * Assert in debug builds. * set errno and return * */ #ifdef _DEBUG #define _CALL_INVALID_PARAMETER_FUNC(funcname, expr) funcname(expr, __FUNCTIONW__, __FILEW__, __LINE__, 0) #define _INVOKE_WATSON_IF_ERROR(expr) _invoke_watson_if_error((expr), __STR2WSTR(#expr), __FUNCTIONW__, __FILEW__, __LINE__, 0) #define _INVOKE_WATSON_IF_ONEOF(expr, errvalue1, errvalue2) _invoke_watson_if_oneof(expr, (errvalue1), (errvalue2), __STR2WSTR(#expr), __FUNCTIONW__, __FILEW__, __LINE__, 0) #else /* _DEBUG */ #define _CALL_INVALID_PARAMETER_FUNC(funcname, expr) funcname(NULL, NULL, NULL, 0, 0) #define _INVOKE_WATSON_IF_ERROR(expr) _invoke_watson_if_error(expr, NULL, NULL, NULL, 0, 0) #define _INVOKE_WATSON_IF_ONEOF(expr, errvalue1, errvalue2) _invoke_watson_if_oneof((expr), (errvalue1), (errvalue2), NULL, NULL, NULL, 0, 0) #endif /* _DEBUG */ #define _INVALID_PARAMETER(expr) _CALL_INVALID_PARAMETER_FUNC(_invalid_parameter, expr) #define _VALIDATE_RETURN_VOID( expr, errorcode ) \ { \ int _Expr_val=!!(expr); \ _ASSERT_EXPR( ( _Expr_val ), _CRT_WIDE(#expr) ); \ if ( !( _Expr_val ) ) \ { \ errno = errorcode; \ _INVALID_PARAMETER(_CRT_WIDE(#expr)); \ return; \ } \ } /* * Assert in debug builds. * set errno and return value */ #ifndef _VALIDATE_RETURN #define _VALIDATE_RETURN( expr, errorcode, retexpr ) \ { \ int _Expr_val=!!(expr); \ _ASSERT_EXPR( ( _Expr_val ), _CRT_WIDE(#expr) ); \ if ( !( _Expr_val ) ) \ { \ errno = errorcode; \ _INVALID_PARAMETER(_CRT_WIDE(#expr) ); \ return ( retexpr ); \ } \ } #endif /* _VALIDATE_RETURN */ #ifndef _VALIDATE_RETURN_NOEXC #define _VALIDATE_RETURN_NOEXC( expr, errorcode, retexpr ) \ { \ if ( !(expr) ) \ { \ errno = errorcode; \ return ( retexpr ); \ } \ } #endif /* _VALIDATE_RETURN_NOEXC */ /* * Assert in debug builds. * set errno and set retval for later usage */ #define _VALIDATE_SETRET( expr, errorcode, retval, retexpr ) \ { \ int _Expr_val=!!(expr); \ _ASSERT_EXPR( ( _Expr_val ), _CRT_WIDE(#expr) ); \ if ( !( _Expr_val ) ) \ { \ errno = errorcode; \ _INVALID_PARAMETER(_CRT_WIDE(#expr)); \ retval=( retexpr ); \ } \ } #define _CHECK_FH_RETURN( handle, errorcode, retexpr ) \ { \ if(handle == _NO_CONSOLE_FILENO) \ { \ errno = errorcode; \ return ( retexpr ); \ } \ } /* We use _VALIDATE_STREAM_ANSI_RETURN to ensure that ANSI file operations( fprintf etc) aren't called on files opened as UNICODE. We do this check only if it's an actual FILE pointer & not a string */ #define _VALIDATE_STREAM_ANSI_RETURN( stream, errorcode, retexpr ) \ { \ FILE *_Stream=stream; \ _VALIDATE_RETURN(( (_Stream->_flag & _IOSTRG) || \ ( (_textmode_safe(_fileno(_Stream)) == __IOINFO_TM_ANSI) && \ !_tm_unicode_safe(_fileno(_Stream)))), \ errorcode, retexpr) \ } /* We use _VALIDATE_STREAM_ANSI_SETRET to ensure that ANSI file operations( fprintf etc) aren't called on files opened as UNICODE. We do this check only if it's an actual FILE pointer & not a string. It doesn't actually return immediately */ #define _VALIDATE_STREAM_ANSI_SETRET( stream, errorcode, retval, retexpr) \ { \ FILE *_Stream=stream; \ _VALIDATE_SETRET(( (_Stream->_flag & _IOSTRG) || \ ( (_textmode_safe(_fileno(_Stream)) == __IOINFO_TM_ANSI) && \ !_tm_unicode_safe(_fileno(_Stream)))), \ errorcode, retval, retexpr) \ } /* * Assert in debug builds. * Return value (do not set errno) */ #define _VALIDATE_RETURN_NOERRNO( expr, retexpr ) \ { \ int _Expr_val=!!(expr); \ _ASSERT_EXPR( ( _Expr_val ), _CRT_WIDE(#expr) ); \ if ( !( _Expr_val ) ) \ { \ _INVALID_PARAMETER(_CRT_WIDE(#expr)); \ return ( retexpr ); \ } \ } /* * Assert in debug builds. * set errno and return errorcode */ #define _VALIDATE_RETURN_ERRCODE( expr, errorcode ) \ { \ int _Expr_val=!!(expr); \ _ASSERT_EXPR( ( _Expr_val ), _CRT_WIDE(#expr) ); \ if ( !( _Expr_val ) ) \ { \ errno = errorcode; \ _INVALID_PARAMETER(_CRT_WIDE(#expr)); \ return ( errorcode ); \ } \ } #define _VALIDATE_RETURN_ERRCODE_NOEXC( expr, errorcode ) \ { \ if (!(expr)) \ { \ errno = errorcode; \ return ( errorcode ); \ } \ } #define _VALIDATE_CLEAR_OSSERR_RETURN( expr, errorcode, retexpr ) \ { \ int _Expr_val=!!(expr); \ _ASSERT_EXPR( ( _Expr_val ), _CRT_WIDE(#expr) ); \ if ( !( _Expr_val ) ) \ { \ _doserrno = 0L; \ errno = errorcode; \ _INVALID_PARAMETER(_CRT_WIDE(#expr) ); \ return ( retexpr ); \ } \ } #define _CHECK_FH_CLEAR_OSSERR_RETURN( handle, errorcode, retexpr ) \ { \ if(handle == _NO_CONSOLE_FILENO) \ { \ _doserrno = 0L; \ errno = errorcode; \ return ( retexpr ); \ } \ } #define _VALIDATE_CLEAR_OSSERR_RETURN_ERRCODE( expr, errorcode ) \ { \ int _Expr_val=!!(expr); \ _ASSERT_EXPR( ( _Expr_val ), _CRT_WIDE(#expr) ); \ if ( !( _Expr_val ) ) \ { \ _doserrno = 0L; \ errno = errorcode; \ _INVALID_PARAMETER(_CRT_WIDE(#expr)); \ return ( errorcode ); \ } \ } #define _CHECK_FH_CLEAR_OSSERR_RETURN_ERRCODE( handle, retexpr ) \ { \ if(handle == _NO_CONSOLE_FILENO) \ { \ _doserrno = 0L; \ return ( retexpr ); \ } \ } #ifdef _DEBUG extern size_t __crtDebugFillThreshold; #endif /* _DEBUG */ #if !defined (_SECURECRT_FILL_BUFFER_THRESHOLD) #ifdef _DEBUG #define _SECURECRT_FILL_BUFFER_THRESHOLD __crtDebugFillThreshold #else /* _DEBUG */ #define _SECURECRT_FILL_BUFFER_THRESHOLD ((size_t)0) #endif /* _DEBUG */ #endif /* !defined (_SECURECRT_FILL_BUFFER_THRESHOLD) */ #if _SECURECRT_FILL_BUFFER #define _SECURECRT__FILL_STRING(_String, _Size, _Offset) \ if ((_Size) != ((size_t)-1) && (_Size) != INT_MAX && \ ((size_t)(_Offset)) < (_Size)) \ { \ memset((_String) + (_Offset), \ _SECURECRT_FILL_BUFFER_PATTERN, \ (_SECURECRT_FILL_BUFFER_THRESHOLD < ((size_t)((_Size) - (_Offset))) ? \ _SECURECRT_FILL_BUFFER_THRESHOLD : \ ((_Size) - (_Offset))) * sizeof(*(_String))); \ } #else /* _SECURECRT_FILL_BUFFER */ #define _SECURECRT__FILL_STRING(_String, _Size, _Offset) #endif /* _SECURECRT_FILL_BUFFER */ #if _SECURECRT_FILL_BUFFER #define _SECURECRT__FILL_BYTE(_Position) \ if (_SECURECRT_FILL_BUFFER_THRESHOLD > 0) \ { \ (_Position) = _SECURECRT_FILL_BUFFER_PATTERN; \ } #else /* _SECURECRT_FILL_BUFFER */ #define _SECURECRT__FILL_BYTE(_Position) #endif /* _SECURECRT_FILL_BUFFER */ #ifdef __cplusplus #define _REDIRECT_TO_L_VERSION_FUNC_PROLOGUE extern "C" #else /* __cplusplus */ #define _REDIRECT_TO_L_VERSION_FUNC_PROLOGUE #endif /* __cplusplus */ /* helper macros to redirect an mbs function to the corresponding _l version */ #define _REDIRECT_TO_L_VERSION_1(_ReturnType, _FunctionName, _Type1) \ _REDIRECT_TO_L_VERSION_FUNC_PROLOGUE \ _ReturnType __cdecl _FunctionName(_Type1 _Arg1) \ { \ return _FunctionName##_l(_Arg1, NULL); \ } #define _REDIRECT_TO_L_VERSION_2(_ReturnType, _FunctionName, _Type1, _Type2) \ _REDIRECT_TO_L_VERSION_FUNC_PROLOGUE \ _ReturnType __cdecl _FunctionName(_Type1 _Arg1, _Type2 _Arg2) \ { \ return _FunctionName##_l(_Arg1, _Arg2, NULL); \ } #define _REDIRECT_TO_L_VERSION_3(_ReturnType, _FunctionName, _Type1, _Type2, _Type3) \ _REDIRECT_TO_L_VERSION_FUNC_PROLOGUE \ _ReturnType __cdecl _FunctionName(_Type1 _Arg1, _Type2 _Arg2, _Type3 _Arg3) \ { \ return _FunctionName##_l(_Arg1, _Arg2, _Arg3, NULL); \ } #define _REDIRECT_TO_L_VERSION_4(_ReturnType, _FunctionName, _Type1, _Type2, _Type3, _Type4) \ _REDIRECT_TO_L_VERSION_FUNC_PROLOGUE \ _ReturnType __cdecl _FunctionName(_Type1 _Arg1, _Type2 _Arg2, _Type3 _Arg3, _Type4 _Arg4) \ { \ return _FunctionName##_l(_Arg1, _Arg2, _Arg3, _Arg4, NULL); \ } #define _REDIRECT_TO_L_VERSION_5(_ReturnType, _FunctionName, _Type1, _Type2, _Type3, _Type4, _Type5) \ _REDIRECT_TO_L_VERSION_FUNC_PROLOGUE \ _ReturnType __cdecl _FunctionName(_Type1 _Arg1, _Type2 _Arg2, _Type3 _Arg3, _Type4 _Arg4, _Type5 _Arg5) \ { \ return _FunctionName##_l(_Arg1, _Arg2, _Arg3, _Arg4, _Arg5, NULL); \ } #define _REDIRECT_TO_L_VERSION_6(_ReturnType, _FunctionName, _Type1, _Type2, _Type3, _Type4, _Type5, _Type6) \ _REDIRECT_TO_L_VERSION_FUNC_PROLOGUE \ _ReturnType __cdecl _FunctionName(_Type1 _Arg1, _Type2 _Arg2, _Type3 _Arg3, _Type4 _Arg4, _Type5 _Arg5, _Type6 _Arg6) \ { \ return _FunctionName##_l(_Arg1, _Arg2, _Arg3, _Arg4, _Arg5, _Arg6, NULL); \ } /* internal helper functions for encoding and decoding pointers */ void __cdecl _init_pointers(); _CRTIMP void * __cdecl _encode_pointer(void *); _CRTIMP void * __cdecl _encoded_null(); _CRTIMP void * __cdecl _decode_pointer(void *); /* internal helper function for communicating with the debugger */ BOOL DebuggerKnownHandle(); /* Macros to simplify the use of Secure CRT in the CRT itself. * We should use [_BEGIN/_END]_SECURE_CRT_DEPRECATION_DISABLE sparingly. */ #define _BEGIN_SECURE_CRT_DEPRECATION_DISABLE \ __pragma(warning(push)) \ __pragma(warning(disable:4996)) #define _END_SECURE_CRT_DEPRECATION_DISABLE \ __pragma(warning(pop)) #define _ERRCHECK(e) \ _INVOKE_WATSON_IF_ERROR(e) #define _ERRCHECK_EINVAL(e) \ _INVOKE_WATSON_IF_ONEOF(e, EINVAL, EINVAL) #define _ERRCHECK_EINVAL_ERANGE(e) \ _INVOKE_WATSON_IF_ONEOF(e, EINVAL, ERANGE) #define _ERRCHECK_SPRINTF(_PrintfCall) \ { \ errno_t _SaveErrno = errno; \ errno = 0; \ if ( ( _PrintfCall ) < 0) \ { \ _ERRCHECK_EINVAL_ERANGE(errno); \ } \ errno = _SaveErrno; \ } /* internal helper function to access environment variable in read-only mode */ const char16_t * __cdecl _wgetenv_helper_nolock(const char16_t *); const char * __cdecl _getenv_helper_nolock(const char *); /* internal helper routines used to query a PE image header. */ BOOL __cdecl _ValidateImageBase(PBYTE pImageBase); PIMAGE_SECTION_HEADER __cdecl _FindPESection(PBYTE pImageBase, DWORD_PTR rva); BOOL __cdecl _IsNonwritableInCurrentImage(PBYTE pTarget); #ifdef __cplusplus } #endif /* __cplusplus */ #ifdef _MSC_VER #pragma pack(pop) #endif /* _MSC_VER */ #endif /* _INC_INTERNAL */
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*** *internal.h - contains declarations of internal routines and variables * * *Purpose: * Declares routines and variables used internally by the C run-time. * * [Internal] * ****/ #if _MSC_VER > 1000 #pragma once #endif /* _MSC_VER > 1000 */ #ifndef _INC_INTERNAL #define _INC_INTERNAL #include <crtdefs.h> #ifdef __cplusplus extern "C" { #endif /* __cplusplus */ #include <cruntime.h> #include <limits.h> /* * Conditionally include windows.h to pick up the definition of * CRITICAL_SECTION. */ #include <windows.h> #ifdef _MSC_VER #pragma pack(push,_CRT_PACKING) #endif /* _MSC_VER */ /* Define function types used in several startup sources */ typedef void (__cdecl *_PVFV)(void); typedef int (__cdecl *_PIFV)(void); typedef void (__cdecl *_PVFI)(int); #if _MSC_VER >= 1400 && defined(_M_CEE) typedef const void* (__clrcall *_PVFVM)(void); typedef int (__clrcall *_PIFVM)(void); typedef void (__clrcall *_CPVFV)(void); #endif /* _MSC_VER >= 1400 && defined(_M_CEE) */ #if defined (_M_CEE_PURE) || (defined (_DLL) && defined (_M_IX86)) /* Retained for compatibility with VC++ 5.0 and earlier versions */ _CRTIMP int * __cdecl __p__commode(void); #endif /* defined (_M_CEE_PURE) || (defined (_DLL) && defined (_M_IX86)) */ #if defined (SPECIAL_CRTEXE) && defined (_DLL) extern int _commode; #else /* defined (SPECIAL_CRTEXE) && defined (_DLL) */ #ifndef _M_CEE_PURE _CRTIMP extern int _commode; #else /* _M_CEE_PURE */ #define _commode (*__p___commode()) #endif /* _M_CEE_PURE */ #endif /* defined (SPECIAL_CRTEXE) && defined (_DLL) */ #define __IOINFO_TM_ANSI 0 /* Regular Text */ #define __IOINFO_TM_UTF8 1 /* UTF8 Encoded */ #define __IOINFO_TM_UTF16LE 2 /* UTF16 Little Endian Encoded */ /* * Control structure for lowio file handles */ typedef struct { intptr_t osfhnd; /* underlying OS file HANDLE */ char osfile; /* attributes of file (e.g., open in text mode?) */ char pipech; /* one char buffer for handles opened on pipes */ int lockinitflag; CRITICAL_SECTION lock; #ifndef _SAFECRT_IMPL /* Not used in the safecrt downlevel. We do not define them, so we cannot use them accidentally */ char textmode : 7; /* __IOINFO_TM_ANSI or __IOINFO_TM_UTF8 or __IOINFO_TM_UTF16LE */ char unicode : 1; /* Was the file opened as unicode? */ char pipech2[2]; /* 2 more peak ahead chars for UNICODE mode */ #endif /* _SAFECRT_IMPL */ } ioinfo; /* * Definition of IOINFO_L2E, the log base 2 of the number of elements in each * array of ioinfo structs. */ #define IOINFO_L2E 5 /* * Definition of IOINFO_ARRAY_ELTS, the number of elements in ioinfo array */ #define IOINFO_ARRAY_ELTS (1 << IOINFO_L2E) /* * Definition of IOINFO_ARRAYS, maximum number of supported ioinfo arrays. */ #define IOINFO_ARRAYS 64 #define _NHANDLE_ (IOINFO_ARRAYS * IOINFO_ARRAY_ELTS) #define _TZ_STRINGS_SIZE 64 /* * Access macros for getting at an ioinfo struct and its fields from a * file handle */ #define _pioinfo(i) ( __pioinfo[(i) >> IOINFO_L2E] + ((i) & (IOINFO_ARRAY_ELTS - \ 1)) ) #define _osfhnd(i) ( _pioinfo(i)->osfhnd ) #define _osfile(i) ( _pioinfo(i)->osfile ) #define _pipech(i) ( _pioinfo(i)->pipech ) #define _pipech2(i) ( _pioinfo(i)->pipech2 ) #define _textmode(i) ( _pioinfo(i)->textmode ) #define _tm_unicode(i) ( _pioinfo(i)->unicode ) /* * Safer versions of the above macros. Currently, only _osfile_safe is * used. */ #define _pioinfo_safe(i) ( (((i) != -1) && ((i) != -2)) ? _pioinfo(i) : &__badioinfo ) #define _osfhnd_safe(i) ( _pioinfo_safe(i)->osfhnd ) #define _osfile_safe(i) ( _pioinfo_safe(i)->osfile ) #define _pipech_safe(i) ( _pioinfo_safe(i)->pipech ) #define _pipech2_safe(i) ( _pioinfo_safe(i)->pipech2 ) #ifdef _SAFECRT_IMPL /* safecrt does not have support for textmode, so we always return __IOINFO_TM_ANSI */ #define _textmode_safe(i) __IOINFO_TM_ANSI #define _tm_unicode_safe(i) 0 #else /* _SAFECRT_IMPL */ #define _textmode_safe(i) ( _pioinfo_safe(i)->textmode ) #define _tm_unicode_safe(i) ( _pioinfo_safe(i)->unicode ) #endif /* _SAFECRT_IMPL */ #ifndef _M_CEE_PURE #ifdef _SAFECRT_IMPL /* We need to get this from the downlevel DLL, even when we build safecrt.lib */ extern __declspec(dllimport) ioinfo __badioinfo; extern __declspec(dllimport) ioinfo * __pioinfo[]; #else /* _SAFECRT_IMPL */ /* * Special, static ioinfo structure used only for more graceful handling * of a C file handle value of -1 (results from common errors at the stdio * level). */ extern _CRTIMP ioinfo __badioinfo; /* * Array of arrays of control structures for lowio files. */ extern _CRTIMP ioinfo * __pioinfo[]; #endif /* _SAFECRT_IMPL */ #endif /* _M_CEE_PURE */ /* * Current number of allocated ioinfo structures (_NHANDLE_ is the upper * limit). */ extern int _nhandle; int __cdecl _alloc_osfhnd(void); int __cdecl _free_osfhnd(int); int __cdecl _set_osfhnd(int, intptr_t); /* fileno for stdout, stdin & stderr when there is no console */ #define _NO_CONSOLE_FILENO (intptr_t)-2 extern const char __dnames[]; extern const char __mnames[]; extern int _days[]; extern int _lpdays[]; extern __time32_t __cdecl __loctotime32_t(int, int, int, int, int, int, int); extern __time64_t __cdecl __loctotime64_t(int, int, int, int, int, int, int); extern void __cdecl __tzset(void); extern int __cdecl _validdrive(unsigned); /* * If we are only interested in years between 1901 and 2099, we could use this: * * #define IS_LEAP_YEAR(y) (y % 4 == 0) */ #define IS_LEAP_YEAR(y) (((y) % 4 == 0 && (y) % 100 != 0) || (y) % 400 == 0) /* * This variable is in the C start-up; the length must be kept synchronized * It is used by the *cenvarg.c modules */ extern char _acfinfo[]; /* "_C_FILE_INFO=" */ #define CFI_LENGTH 12 /* "_C_FILE_INFO" is 12 bytes long */ /* * stdio internals */ #ifndef _FILE_DEFINED struct _iobuf { char *_ptr; int _cnt; char *_base; int _flag; int _file; int _charbuf; int _bufsiz; char *_tmpfname; }; typedef struct _iobuf FILE; #define _FILE_DEFINED #endif /* _FILE_DEFINED */ #if !defined (_FILEX_DEFINED) && defined (_WINDOWS_) /* * Variation of FILE type used for the dynamically allocated portion of * __piob[]. For single thread, _FILEX is the same as FILE. For multithread * models, _FILEX has two fields: the FILE struct and the CRITICAL_SECTION * struct used to serialize access to the FILE. */ typedef struct { FILE f; CRITICAL_SECTION lock; } _FILEX; #define _FILEX_DEFINED #endif /* !defined (_FILEX_DEFINED) && defined (_WINDOWS_) */ /* * Number of entries supported in the array pointed to by __piob[]. That is, * the number of stdio-level files which may be open simultaneously. This * is normally set to _NSTREAM_ by the stdio initialization code. */ extern int _nstream; /* * Pointer to the array of pointers to FILE/_FILEX structures that are used * to manage stdio-level files. */ extern void **__piob; FILE * __cdecl _getstream(void); FILE * __cdecl _openfile(_In_z_ const char * _Filename, _In_z_ const char * _Mode, _In_ int _ShFlag, _Out_ FILE * _File); FILE * __cdecl _wopenfile(_In_z_ const char16_t * _Filename, _In_z_ const char16_t * _Mode, _In_ int _ShFlag, _Out_ FILE * _File); void __cdecl _getbuf(_Out_ FILE * _File); int __cdecl _filwbuf (__inout FILE * _File); int __cdecl _flswbuf(_In_ int _Ch, __inout FILE * _File); void __cdecl _freebuf(__inout FILE * _File); int __cdecl _stbuf(__inout FILE * _File); void __cdecl _ftbuf(int _Flag, __inout FILE * _File); #ifdef _SAFECRT_IMPL int __cdecl _output(__inout FILE * _File, _In_z_ __format_string const char *_Format, va_list _ArgList); int __cdecl _woutput(__inout FILE * _File, _In_z_ __format_string const char16_t *_Format, va_list _ArgList); int __cdecl _output_s(__inout FILE * _File, _In_z_ __format_string const char *_Format, va_list _ArgList); int __cdecl _output_p(__inout FILE * _File, _In_z_ __format_string const char *_Format, va_list _ArgList); int __cdecl _woutput_s(__inout FILE * _File, _In_z_ __format_string const char16_t *_Format, va_list _ArgList); int __cdecl _woutput_p(__inout FILE * _File, _In_z_ __format_string const char16_t *_Format, va_list _ArgList); typedef int (*OUTPUTFN)(FILE *, const char *, va_list); typedef int (*WOUTPUTFN)(FILE *, const char16_t *, va_list); #else /* _SAFECRT_IMPL */ int __cdecl _output_l(__inout FILE * _File, _In_z_ __format_string const char *_Format, _In_opt_ _locale_t _Locale, va_list _ArgList); int __cdecl _woutput_l(__inout FILE * _File, _In_z_ __format_string const char16_t *_Format, _In_opt_ _locale_t _Locale, va_list _ArgList); int __cdecl _output_s_l(__inout FILE * _File, _In_z_ __format_string const char *_Format, _In_opt_ _locale_t _Locale, va_list _ArgList); int __cdecl _output_p_l(__inout FILE * _File, _In_z_ __format_string const char *_Format, _In_opt_ _locale_t _Locale, va_list _ArgList); int __cdecl _woutput_s_l(__inout FILE * _File, _In_z_ __format_string const char16_t *_Format, _In_opt_ _locale_t _Locale, va_list _ArgList); int __cdecl _woutput_p_l(__inout FILE * _File, _In_z_ __format_string const char16_t *_Format, _In_opt_ _locale_t _Locale, va_list _ArgList); typedef int (*OUTPUTFN)(__inout FILE * _File, const char *, _locale_t, va_list); typedef int (*WOUTPUTFN)(__inout FILE * _File, const char16_t *, _locale_t, va_list); #endif /* _SAFECRT_IMPL */ #ifdef _SAFECRT_IMPL int __cdecl _input(_In_ FILE * _File, _In_z_ __format_string const unsigned char * _Format, va_list _ArgList); int __cdecl _winput(_In_ FILE * _File, _In_z_ __format_string const char16_t * _Format, va_list _ArgList); int __cdecl _input_s(_In_ FILE * _File, _In_z_ __format_string const unsigned char * _Format, va_list _ArgList); int __cdecl _winput_s(_In_ FILE * _File, _In_z_ __format_string const char16_t * _Format, va_list _ArgList); typedef int (*INPUTFN)(FILE *, const unsigned char *, va_list); typedef int (*WINPUTFN)(FILE *, const char16_t *, va_list); #else /* _SAFECRT_IMPL */ int __cdecl _input_l(__inout FILE * _File, _In_z_ __format_string const unsigned char *, _In_opt_ _locale_t _Locale, va_list _ArgList); int __cdecl _winput_l(__inout FILE * _File, _In_z_ __format_string const char16_t *, _In_opt_ _locale_t _Locale, va_list _ArgList); int __cdecl _input_s_l(__inout FILE * _File, _In_z_ __format_string const unsigned char *, _In_opt_ _locale_t _Locale, va_list _ArgList); int __cdecl _winput_s_l(__inout FILE * _File, _In_z_ __format_string const char16_t *, _In_opt_ _locale_t _Locale, va_list _ArgList); typedef int (*INPUTFN)(FILE *, const unsigned char *, _locale_t, va_list); typedef int (*WINPUTFN)(FILE *, const char16_t *, _locale_t, va_list); #ifdef _UNICODE #define TINPUTFN WINPUTFN #else /* _UNICODE */ #define TINPUTFN INPUTFN #endif /* _UNICODE */ #endif /* _SAFECRT_IMPL */ int __cdecl _flush(__inout FILE * _File); void __cdecl _endstdio(void); errno_t __cdecl _sopen_helper(_In_z_ const char * _Filename, _In_ int _OFlag, _In_ int _ShFlag, _In_ int _PMode, _Out_ int * _PFileHandle, int _BSecure); errno_t __cdecl _wsopen_helper(_In_z_ const char16_t * _Filename, _In_ int _OFlag, _In_ int _ShFlag, _In_ int _PMode, _Out_ int * _PFileHandle, int _BSecure); #ifndef CRTDLL extern int _cflush; #endif /* CRTDLL */ extern unsigned int _tempoff; extern unsigned int _old_pfxlen; extern int _umaskval; /* the umask value */ extern char _pipech[]; /* pipe lookahead */ extern char _exitflag; /* callable termination flag */ extern int _C_Termination_Done; /* termination done flag */ char * __cdecl _getpath(_In_z_ const char * _Src, _Out_writes_z_(_SizeInChars) char * _Dst, _In_ size_t _SizeInChars); char16_t * __cdecl _wgetpath(_In_z_ const char16_t * _Src, _Out_writes_z_(_SizeInWords) char16_t * _Dst, _In_ size_t _SizeInWords); extern int _dowildcard; /* flag to enable argv[] wildcard expansion */ #ifndef _PNH_DEFINED typedef int (__cdecl * _PNH)( size_t ); #define _PNH_DEFINED #endif /* _PNH_DEFINED */ #if _MSC_VER >= 1400 && defined(_M_CEE) #ifndef __MPNH_DEFINED typedef int (__clrcall * __MPNH)( size_t ); #define __MPNH_DEFINED #endif /* __MPNH_DEFINED */ #endif /* _MSC_VER >= 1400 && defined(_M_CEE) */ /* calls the currently installed new handler */ int __cdecl _callnewh(_In_ size_t _Size); extern int _newmode; /* malloc new() handler mode */ /* pointer to initial environment block that is passed to [w]main */ #ifndef _M_CEE_PURE extern _CRTIMP char16_t **__winitenv; extern _CRTIMP char **__initenv; #endif /* _M_CEE_PURE */ /* _calloca helper */ #define _calloca(count, size) ((count<=0 || size<=0 || ((((size_t)_HEAP_MAXREQ) / ((size_t)count)) < ((size_t)size)))? NULL : _malloca(count * size)) /* startup set values */ extern char *_aenvptr; /* environment ptr */ extern char16_t *_wenvptr; /* wide environment ptr */ /* command line */ #if defined (_DLL) _CRTIMP char ** __cdecl __p__acmdln(void); _CRTIMP char16_t ** __cdecl __p__wcmdln(void); #endif /* defined (_DLL) */ #ifndef _M_CEE_PURE _CRTIMP extern char *_acmdln; _CRTIMP extern char16_t *_wcmdln; #else /* _M_CEE_PURE */ #define _acmdln (*__p__acmdln()) #define _wcmdln (*__p__wcmdln()) #endif /* _M_CEE_PURE */ /* * prototypes for internal startup functions */ int __cdecl _cwild(void); /* wild.c */ int __cdecl _wcwild(void); /* wwild.c */ int __cdecl _mtinit(void); /* tidtable.c */ void __cdecl _mtterm(void); /* tidtable.c */ int __cdecl _mtinitlocks(void); /* mlock.c */ void __cdecl _mtdeletelocks(void); /* mlock.c */ int __cdecl _mtinitlocknum(int); /* mlock.c */ /* Wrapper for InitializeCriticalSection API, with default spin count */ int __cdecl __crtInitCritSecAndSpinCount(PCRITICAL_SECTION, DWORD); #define _CRT_SPINCOUNT 4000 /* * C source build only!!!! * * more prototypes for internal startup functions */ void __cdecl _amsg_exit(int); /* crt0.c */ void __cdecl __crtExitProcess(int); /* crt0dat.c */ void __cdecl __crtCorExitProcess(int); /* crt0dat.c */ void __cdecl __crtdll_callstaticterminators(void); /* crt0dat.c */ /* _cinit now allows the caller to suppress floating point precision init This allows the DLLs that use the CRT to not initialise FP precision, allowing the EXE's setting to persist even when a DLL is loaded */ int __cdecl _cinit(int /* initFloatingPrecision */); /* crt0dat.c */ void __cdecl __doinits(void); /* astart.asm */ void __cdecl __doterms(void); /* astart.asm */ void __cdecl __dopreterms(void); /* astart.asm */ void __cdecl _FF_MSGBANNER(void); void __cdecl _fpmath(int /*initPrecision*/); void __cdecl _fpclear(void); void __cdecl _fptrap(void); /* crt0fp.c */ int __cdecl _heap_init(int); void __cdecl _heap_term(void); void __cdecl _heap_abort(void); void __cdecl __initconin(void); /* initcon.c */ void __cdecl __initconout(void); /* initcon.c */ int __cdecl _ioinit(void); /* crt0.c, crtlib.c */ void __cdecl _ioterm(void); /* crt0.c, crtlib.c */ char * __cdecl _GET_RTERRMSG(int); void __cdecl _NMSG_WRITE(int); int __CRTDECL _setargv(void); /* setargv.c, stdargv.c */ int __CRTDECL __setargv(void); /* stdargv.c */ int __CRTDECL _wsetargv(void); /* wsetargv.c, wstdargv.c */ int __CRTDECL __wsetargv(void); /* wstdargv.c */ int __cdecl _setenvp(void); /* stdenvp.c */ int __cdecl _wsetenvp(void); /* wstdenvp.c */ void __cdecl __setmbctable(unsigned int); /* mbctype.c */ #ifdef MRTDLL _MRTIMP int __cdecl _onexit_process(_CPVFV); _MRTIMP int __cdecl _onexit_app_domain(_CPVFV); #endif /* MRTDLL */ #ifndef _MANAGED_MAIN int __CRTDECL main(_In_ int _Argc, _In_reads_z_(_Argc) char ** _Argv, _In_z_ char ** _Env); int __CRTDECL wmain(_In_ int _Argc, _In_reads_z_(_Argc) char16_t ** _Argv, _In_z_ char16_t ** _Env); #endif /* _MANAGED_MAIN */ /* helper functions for wide/multibyte environment conversion */ int __cdecl __mbtow_environ (void); int __cdecl __wtomb_environ (void); /* These two functions take a char ** for the environment option At some point during their execution, they take ownership of the memory block passed in using option. At this point, they NULL out the incoming char * / char16_t * to ensure there is no double-free */ int __cdecl __crtsetenv(_Outptr_opt_ char ** _POption, _In_ const int _Primary); int __cdecl __crtwsetenv(_Outptr_opt_ char16_t ** _POption, _In_ const int _Primary); #ifndef _M_CEE_PURE _CRTIMP extern void (__cdecl * _aexit_rtn)(int); #endif /* _M_CEE_PURE */ #if defined (_DLL) || defined (CRTDLL) #ifndef _STARTUP_INFO_DEFINED typedef struct { int newmode; } _startupinfo; #define _STARTUP_INFO_DEFINED #endif /* _STARTUP_INFO_DEFINED */ _CRTIMP int __cdecl __getmainargs(_Out_ int * _Argc, _Outptr_result_buffer_(*_Argc) char *** _Argv, _Outptr_opt_ char *** _Env, _In_ int _DoWildCard, _In_ _startupinfo * _StartInfo); _CRTIMP int __cdecl __wgetmainargs(_Out_ int * _Argc, _Outptr_result_buffer_(*_Argc)char16_t *** _Argv, _Outptr_opt_ char16_t *** _Env, _In_ int _DoWildCard, _In_ _startupinfo * _StartInfo); #endif /* defined (_DLL) || defined (CRTDLL) */ /* * Prototype, variables and constants which determine how error messages are * written out. */ #define _UNKNOWN_APP 0 #define _CONSOLE_APP 1 #define _GUI_APP 2 extern int __app_type; #if !defined (_M_CEE_PURE) extern Volatile<void*> __native_startup_lock; #define __NO_REASON UINT_MAX extern Volatile<unsigned int> __native_dllmain_reason; extern Volatile<unsigned int> __native_vcclrit_reason; #if defined (__cplusplus) #pragma warning(push) #pragma warning(disable: 4483) #if _MSC_FULL_VER >= 140050415 #define _NATIVE_STARTUP_NAMESPACE __identifier("<CrtImplementationDetails>") #else /* _MSC_FULL_VER >= 140050415 */ #define _NATIVE_STARTUP_NAMESPACE __CrtImplementationDetails #endif /* _MSC_FULL_VER >= 140050415 */ namespace _NATIVE_STARTUP_NAMESPACE { class NativeDll { private: static const unsigned int ProcessDetach = 0; static const unsigned int ProcessAttach = 1; static const unsigned int ThreadAttach = 2; static const unsigned int ThreadDetach = 3; static const unsigned int ProcessVerifier = 4; public: inline static bool IsInDllMain() { return (__native_dllmain_reason != __NO_REASON); } inline static bool IsInProcessAttach() { return (__native_dllmain_reason == ProcessAttach); } inline static bool IsInProcessDetach() { return (__native_dllmain_reason == ProcessDetach); } inline static bool IsInVcclrit() { return (__native_vcclrit_reason != __NO_REASON); } inline static bool IsSafeForManagedCode() { if (!IsInDllMain()) { return true; } if (IsInVcclrit()) { return true; } return !IsInProcessAttach() && !IsInProcessDetach(); } }; } #pragma warning(pop) #endif /* defined (__cplusplus) */ #endif /* !defined (_M_CEE_PURE) */ extern int __error_mode; _CRTIMP void __cdecl __set_app_type(int); #if defined (CRTDLL) && !defined (_SYSCRT) /* * All these function pointer are used for creating global state of CRT * functions. Either all of them will be set or all of them will be NULL */ typedef void (__cdecl *_set_app_type_function)(int); typedef int (__cdecl *_get_app_type_function)(); extern _set_app_type_function __set_app_type_server; extern _get_app_type_function __get_app_type_server; #endif /* defined (CRTDLL) && !defined (_SYSCRT) */ /* * C source build only!!!! * * map Win32 errors into Xenix errno values -- for modules written in C */ _CRTIMP void __cdecl _dosmaperr(unsigned long); extern int __cdecl _get_errno_from_oserr(unsigned long); /* * internal routines used by the exec/spawn functions */ extern intptr_t __cdecl _dospawn(_In_ int _Mode, _In_opt_z_ const char * _Name, __inout_z char * _Cmd, _In_opt_z_ char * _Env); extern intptr_t __cdecl _wdospawn(_In_ int _Mode, _In_opt_z_ const char16_t * _Name, __inout_z char16_t * _Cmd, _In_opt_z_ char16_t * _Env); extern int __cdecl _cenvarg(_In_z_ const char * const * _Argv, _In_opt_z_ const char * const * _Env, _Outptr_opt_ char ** _ArgBlk, _Outptr_opt_ char ** _EnvBlk, _In_z_ const char *_Name); extern int __cdecl _wcenvarg(_In_z_ const char16_t * const * _Argv, _In_opt_z_ const char16_t * const * _Env, _Outptr_opt_ char16_t ** _ArgBlk, _Outptr_opt_ char16_t ** _EnvBlk, _In_z_ const char16_t * _Name); #ifndef _M_IX86 extern char ** _capture_argv(_In_ va_list *, _In_z_ const char * _FirstArg, _Out_writes_z_(_MaxCount) char ** _Static_argv, _In_ size_t _MaxCount); extern char16_t ** _wcapture_argv(_In_ va_list *, _In_z_ const char16_t * _FirstArg, _Out_writes_z_(_MaxCount) char16_t ** _Static_argv, _In_ size_t _MaxCount); #endif /* _M_IX86 */ /* * internal routine used by the abort */ extern _PHNDLR __cdecl __get_sigabrt(void); /* * Type from ntdef.h */ typedef LONG NTSTATUS; /* * Exception code used in _invalid_parameter */ #ifndef STATUS_INVALID_PARAMETER #define STATUS_INVALID_PARAMETER ((NTSTATUS)0xC000000DL) #endif /* STATUS_INVALID_PARAMETER */ /* * Exception code used for abort and _CALL_REPORTFAULT */ #ifndef STATUS_FATAL_APP_EXIT #define STATUS_FATAL_APP_EXIT ((NTSTATUS)0x40000015L) #endif /* STATUS_FATAL_APP_EXIT */ /* * Validate functions */ #include <crtdbg.h> /* _ASSERTE */ #include <errno.h> #define __STR2WSTR(str) L##str #define _STR2WSTR(str) __STR2WSTR(str) #define __FILEW__ _STR2WSTR(__FILE__) #define __FUNCTIONW__ _STR2WSTR(__FUNCTION__) /* We completely fill the buffer only in debug (see _SECURECRT__FILL_STRING * and _SECURECRT__FILL_BYTE macros). */ #if !defined (_SECURECRT_FILL_BUFFER) #ifdef _DEBUG #define _SECURECRT_FILL_BUFFER 1 #else /* _DEBUG */ #define _SECURECRT_FILL_BUFFER 0 #endif /* _DEBUG */ #endif /* !defined (_SECURECRT_FILL_BUFFER) */ #ifndef _SAFECRT_IMPL /* _invalid_parameter is already defined in safecrt.h and safecrt.lib */ #if !defined (_NATIVE_char16_t_DEFINED) && defined (_M_CEE_PURE) extern "C++" #endif /* !defined (_NATIVE_char16_t_DEFINED) && defined (_M_CEE_PURE) */ _CRTIMP #endif /* _SAFECRT_IMPL */ void __cdecl _invalid_parameter(_In_opt_z_ const char16_t *, _In_opt_z_ const char16_t *, _In_opt_z_ const char16_t *, unsigned int, uintptr_t); #if !defined (_NATIVE_char16_t_DEFINED) && defined (_M_CEE_PURE) extern "C++" #endif /* !defined (_NATIVE_char16_t_DEFINED) && defined (_M_CEE_PURE) */ _CRTIMP void __cdecl _invoke_watson(_In_opt_z_ const char16_t *, _In_opt_z_ const char16_t *, _In_opt_z_ const char16_t *, unsigned int, uintptr_t); #ifndef _DEBUG #if !defined (_NATIVE_char16_t_DEFINED) && defined (_M_CEE_PURE) extern "C++" #endif /* !defined (_NATIVE_char16_t_DEFINED) && defined (_M_CEE_PURE) */ _CRTIMP void __cdecl _invalid_parameter_noinfo(void); #endif /* _DEBUG */ /* Invoke Watson if _ExpressionError is not 0; otherwise simply return _EspressionError */ __forceinline void _invoke_watson_if_error( errno_t _ExpressionError, const char16_t *_Expression, const char16_t *_Function, const char16_t *_File, unsigned int _Line, uintptr_t _Reserved ) { if (_ExpressionError == 0) { return; } _invoke_watson(_Expression, _Function, _File, _Line, _Reserved); } /* Invoke Watson if _ExpressionError is not 0 and equal to _ErrorValue1 or _ErrorValue2; otherwise simply return _EspressionError */ __forceinline errno_t _invoke_watson_if_oneof( errno_t _ExpressionError, errno_t _ErrorValue1, errno_t _ErrorValue2, const char16_t *_Expression, const char16_t *_Function, const char16_t *_File, unsigned int _Line, uintptr_t _Reserved ) { if (_ExpressionError == 0 || (_ExpressionError != _ErrorValue1 && _ExpressionError != _ErrorValue2)) { return _ExpressionError; } _invoke_watson(_Expression, _Function, _File, _Line, _Reserved); return _ExpressionError; } /* * Assert in debug builds. * set errno and return * */ #ifdef _DEBUG #define _CALL_INVALID_PARAMETER_FUNC(funcname, expr) funcname(expr, __FUNCTIONW__, __FILEW__, __LINE__, 0) #define _INVOKE_WATSON_IF_ERROR(expr) _invoke_watson_if_error((expr), __STR2WSTR(#expr), __FUNCTIONW__, __FILEW__, __LINE__, 0) #define _INVOKE_WATSON_IF_ONEOF(expr, errvalue1, errvalue2) _invoke_watson_if_oneof(expr, (errvalue1), (errvalue2), __STR2WSTR(#expr), __FUNCTIONW__, __FILEW__, __LINE__, 0) #else /* _DEBUG */ #define _CALL_INVALID_PARAMETER_FUNC(funcname, expr) funcname(NULL, NULL, NULL, 0, 0) #define _INVOKE_WATSON_IF_ERROR(expr) _invoke_watson_if_error(expr, NULL, NULL, NULL, 0, 0) #define _INVOKE_WATSON_IF_ONEOF(expr, errvalue1, errvalue2) _invoke_watson_if_oneof((expr), (errvalue1), (errvalue2), NULL, NULL, NULL, 0, 0) #endif /* _DEBUG */ #define _INVALID_PARAMETER(expr) _CALL_INVALID_PARAMETER_FUNC(_invalid_parameter, expr) #define _VALIDATE_RETURN_VOID( expr, errorcode ) \ { \ int _Expr_val=!!(expr); \ _ASSERT_EXPR( ( _Expr_val ), _CRT_WIDE(#expr) ); \ if ( !( _Expr_val ) ) \ { \ errno = errorcode; \ _INVALID_PARAMETER(_CRT_WIDE(#expr)); \ return; \ } \ } /* * Assert in debug builds. * set errno and return value */ #ifndef _VALIDATE_RETURN #define _VALIDATE_RETURN( expr, errorcode, retexpr ) \ { \ int _Expr_val=!!(expr); \ _ASSERT_EXPR( ( _Expr_val ), _CRT_WIDE(#expr) ); \ if ( !( _Expr_val ) ) \ { \ errno = errorcode; \ _INVALID_PARAMETER(_CRT_WIDE(#expr) ); \ return ( retexpr ); \ } \ } #endif /* _VALIDATE_RETURN */ #ifndef _VALIDATE_RETURN_NOEXC #define _VALIDATE_RETURN_NOEXC( expr, errorcode, retexpr ) \ { \ if ( !(expr) ) \ { \ errno = errorcode; \ return ( retexpr ); \ } \ } #endif /* _VALIDATE_RETURN_NOEXC */ /* * Assert in debug builds. * set errno and set retval for later usage */ #define _VALIDATE_SETRET( expr, errorcode, retval, retexpr ) \ { \ int _Expr_val=!!(expr); \ _ASSERT_EXPR( ( _Expr_val ), _CRT_WIDE(#expr) ); \ if ( !( _Expr_val ) ) \ { \ errno = errorcode; \ _INVALID_PARAMETER(_CRT_WIDE(#expr)); \ retval=( retexpr ); \ } \ } #define _CHECK_FH_RETURN( handle, errorcode, retexpr ) \ { \ if(handle == _NO_CONSOLE_FILENO) \ { \ errno = errorcode; \ return ( retexpr ); \ } \ } /* We use _VALIDATE_STREAM_ANSI_RETURN to ensure that ANSI file operations( fprintf etc) aren't called on files opened as UNICODE. We do this check only if it's an actual FILE pointer & not a string */ #define _VALIDATE_STREAM_ANSI_RETURN( stream, errorcode, retexpr ) \ { \ FILE *_Stream=stream; \ _VALIDATE_RETURN(( (_Stream->_flag & _IOSTRG) || \ ( (_textmode_safe(_fileno(_Stream)) == __IOINFO_TM_ANSI) && \ !_tm_unicode_safe(_fileno(_Stream)))), \ errorcode, retexpr) \ } /* We use _VALIDATE_STREAM_ANSI_SETRET to ensure that ANSI file operations( fprintf etc) aren't called on files opened as UNICODE. We do this check only if it's an actual FILE pointer & not a string. It doesn't actually return immediately */ #define _VALIDATE_STREAM_ANSI_SETRET( stream, errorcode, retval, retexpr) \ { \ FILE *_Stream=stream; \ _VALIDATE_SETRET(( (_Stream->_flag & _IOSTRG) || \ ( (_textmode_safe(_fileno(_Stream)) == __IOINFO_TM_ANSI) && \ !_tm_unicode_safe(_fileno(_Stream)))), \ errorcode, retval, retexpr) \ } /* * Assert in debug builds. * Return value (do not set errno) */ #define _VALIDATE_RETURN_NOERRNO( expr, retexpr ) \ { \ int _Expr_val=!!(expr); \ _ASSERT_EXPR( ( _Expr_val ), _CRT_WIDE(#expr) ); \ if ( !( _Expr_val ) ) \ { \ _INVALID_PARAMETER(_CRT_WIDE(#expr)); \ return ( retexpr ); \ } \ } /* * Assert in debug builds. * set errno and return errorcode */ #define _VALIDATE_RETURN_ERRCODE( expr, errorcode ) \ { \ int _Expr_val=!!(expr); \ _ASSERT_EXPR( ( _Expr_val ), _CRT_WIDE(#expr) ); \ if ( !( _Expr_val ) ) \ { \ errno = errorcode; \ _INVALID_PARAMETER(_CRT_WIDE(#expr)); \ return ( errorcode ); \ } \ } #define _VALIDATE_RETURN_ERRCODE_NOEXC( expr, errorcode ) \ { \ if (!(expr)) \ { \ errno = errorcode; \ return ( errorcode ); \ } \ } #define _VALIDATE_CLEAR_OSSERR_RETURN( expr, errorcode, retexpr ) \ { \ int _Expr_val=!!(expr); \ _ASSERT_EXPR( ( _Expr_val ), _CRT_WIDE(#expr) ); \ if ( !( _Expr_val ) ) \ { \ _doserrno = 0L; \ errno = errorcode; \ _INVALID_PARAMETER(_CRT_WIDE(#expr) ); \ return ( retexpr ); \ } \ } #define _CHECK_FH_CLEAR_OSSERR_RETURN( handle, errorcode, retexpr ) \ { \ if(handle == _NO_CONSOLE_FILENO) \ { \ _doserrno = 0L; \ errno = errorcode; \ return ( retexpr ); \ } \ } #define _VALIDATE_CLEAR_OSSERR_RETURN_ERRCODE( expr, errorcode ) \ { \ int _Expr_val=!!(expr); \ _ASSERT_EXPR( ( _Expr_val ), _CRT_WIDE(#expr) ); \ if ( !( _Expr_val ) ) \ { \ _doserrno = 0L; \ errno = errorcode; \ _INVALID_PARAMETER(_CRT_WIDE(#expr)); \ return ( errorcode ); \ } \ } #define _CHECK_FH_CLEAR_OSSERR_RETURN_ERRCODE( handle, retexpr ) \ { \ if(handle == _NO_CONSOLE_FILENO) \ { \ _doserrno = 0L; \ return ( retexpr ); \ } \ } #ifdef _DEBUG extern size_t __crtDebugFillThreshold; #endif /* _DEBUG */ #if !defined (_SECURECRT_FILL_BUFFER_THRESHOLD) #ifdef _DEBUG #define _SECURECRT_FILL_BUFFER_THRESHOLD __crtDebugFillThreshold #else /* _DEBUG */ #define _SECURECRT_FILL_BUFFER_THRESHOLD ((size_t)0) #endif /* _DEBUG */ #endif /* !defined (_SECURECRT_FILL_BUFFER_THRESHOLD) */ #if _SECURECRT_FILL_BUFFER #define _SECURECRT__FILL_STRING(_String, _Size, _Offset) \ if ((_Size) != ((size_t)-1) && (_Size) != INT_MAX && \ ((size_t)(_Offset)) < (_Size)) \ { \ memset((_String) + (_Offset), \ _SECURECRT_FILL_BUFFER_PATTERN, \ (_SECURECRT_FILL_BUFFER_THRESHOLD < ((size_t)((_Size) - (_Offset))) ? \ _SECURECRT_FILL_BUFFER_THRESHOLD : \ ((_Size) - (_Offset))) * sizeof(*(_String))); \ } #else /* _SECURECRT_FILL_BUFFER */ #define _SECURECRT__FILL_STRING(_String, _Size, _Offset) #endif /* _SECURECRT_FILL_BUFFER */ #if _SECURECRT_FILL_BUFFER #define _SECURECRT__FILL_BYTE(_Position) \ if (_SECURECRT_FILL_BUFFER_THRESHOLD > 0) \ { \ (_Position) = _SECURECRT_FILL_BUFFER_PATTERN; \ } #else /* _SECURECRT_FILL_BUFFER */ #define _SECURECRT__FILL_BYTE(_Position) #endif /* _SECURECRT_FILL_BUFFER */ #ifdef __cplusplus #define _REDIRECT_TO_L_VERSION_FUNC_PROLOGUE extern "C" #else /* __cplusplus */ #define _REDIRECT_TO_L_VERSION_FUNC_PROLOGUE #endif /* __cplusplus */ /* helper macros to redirect an mbs function to the corresponding _l version */ #define _REDIRECT_TO_L_VERSION_1(_ReturnType, _FunctionName, _Type1) \ _REDIRECT_TO_L_VERSION_FUNC_PROLOGUE \ _ReturnType __cdecl _FunctionName(_Type1 _Arg1) \ { \ return _FunctionName##_l(_Arg1, NULL); \ } #define _REDIRECT_TO_L_VERSION_2(_ReturnType, _FunctionName, _Type1, _Type2) \ _REDIRECT_TO_L_VERSION_FUNC_PROLOGUE \ _ReturnType __cdecl _FunctionName(_Type1 _Arg1, _Type2 _Arg2) \ { \ return _FunctionName##_l(_Arg1, _Arg2, NULL); \ } #define _REDIRECT_TO_L_VERSION_3(_ReturnType, _FunctionName, _Type1, _Type2, _Type3) \ _REDIRECT_TO_L_VERSION_FUNC_PROLOGUE \ _ReturnType __cdecl _FunctionName(_Type1 _Arg1, _Type2 _Arg2, _Type3 _Arg3) \ { \ return _FunctionName##_l(_Arg1, _Arg2, _Arg3, NULL); \ } #define _REDIRECT_TO_L_VERSION_4(_ReturnType, _FunctionName, _Type1, _Type2, _Type3, _Type4) \ _REDIRECT_TO_L_VERSION_FUNC_PROLOGUE \ _ReturnType __cdecl _FunctionName(_Type1 _Arg1, _Type2 _Arg2, _Type3 _Arg3, _Type4 _Arg4) \ { \ return _FunctionName##_l(_Arg1, _Arg2, _Arg3, _Arg4, NULL); \ } #define _REDIRECT_TO_L_VERSION_5(_ReturnType, _FunctionName, _Type1, _Type2, _Type3, _Type4, _Type5) \ _REDIRECT_TO_L_VERSION_FUNC_PROLOGUE \ _ReturnType __cdecl _FunctionName(_Type1 _Arg1, _Type2 _Arg2, _Type3 _Arg3, _Type4 _Arg4, _Type5 _Arg5) \ { \ return _FunctionName##_l(_Arg1, _Arg2, _Arg3, _Arg4, _Arg5, NULL); \ } #define _REDIRECT_TO_L_VERSION_6(_ReturnType, _FunctionName, _Type1, _Type2, _Type3, _Type4, _Type5, _Type6) \ _REDIRECT_TO_L_VERSION_FUNC_PROLOGUE \ _ReturnType __cdecl _FunctionName(_Type1 _Arg1, _Type2 _Arg2, _Type3 _Arg3, _Type4 _Arg4, _Type5 _Arg5, _Type6 _Arg6) \ { \ return _FunctionName##_l(_Arg1, _Arg2, _Arg3, _Arg4, _Arg5, _Arg6, NULL); \ } /* internal helper functions for encoding and decoding pointers */ void __cdecl _init_pointers(); _CRTIMP void * __cdecl _encode_pointer(void *); _CRTIMP void * __cdecl _encoded_null(); _CRTIMP void * __cdecl _decode_pointer(void *); /* internal helper function for communicating with the debugger */ BOOL DebuggerKnownHandle(); #define _ERRCHECK(e) \ _INVOKE_WATSON_IF_ERROR(e) #define _ERRCHECK_EINVAL(e) \ _INVOKE_WATSON_IF_ONEOF(e, EINVAL, EINVAL) #define _ERRCHECK_EINVAL_ERANGE(e) \ _INVOKE_WATSON_IF_ONEOF(e, EINVAL, ERANGE) #define _ERRCHECK_SPRINTF(_PrintfCall) \ { \ errno_t _SaveErrno = errno; \ errno = 0; \ if ( ( _PrintfCall ) < 0) \ { \ _ERRCHECK_EINVAL_ERANGE(errno); \ } \ errno = _SaveErrno; \ } /* internal helper function to access environment variable in read-only mode */ const char16_t * __cdecl _wgetenv_helper_nolock(const char16_t *); const char * __cdecl _getenv_helper_nolock(const char *); /* internal helper routines used to query a PE image header. */ BOOL __cdecl _ValidateImageBase(PBYTE pImageBase); PIMAGE_SECTION_HEADER __cdecl _FindPESection(PBYTE pImageBase, DWORD_PTR rva); BOOL __cdecl _IsNonwritableInCurrentImage(PBYTE pTarget); #ifdef __cplusplus } #endif /* __cplusplus */ #ifdef _MSC_VER #pragma pack(pop) #endif /* _MSC_VER */ #endif /* _INC_INTERNAL */
1
dotnet/runtime
66,234
Remove compiler warning suppression
Contributes to https://github.com/dotnet/runtime/issues/66154 All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address. ~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~ Upstream PR: https://github.com/libunwind/libunwind/pull/333 /cc @GrabYourPitchforks
AaronRobinsonMSFT
2022-03-05T03:45:49Z
2022-03-09T00:57:12Z
220e67755ae6323a01011b83070dff6f84b02519
853e494abd1c1e12d28a76829b4680af7f694afa
Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154 All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address. ~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~ Upstream PR: https://github.com/libunwind/libunwind/pull/333 /cc @GrabYourPitchforks
./src/coreclr/tools/superpmi/superpmi-shared/standardpch.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #ifndef STANDARDPCH_H #define STANDARDPCH_H // The point of a PCH file is to never reparse files that never change. // Only include files here that will almost NEVER change. Headers for the project // itself are probably inappropriate, because if you change them, the entire // project will require a recompile. Generally just put SDK style stuff here... #ifndef WIN32_LEAN_AND_MEAN #define WIN32_LEAN_AND_MEAN #endif // WIN32_LEAN_AND_MEAN #include <windows.h> #ifdef INTERNAL_BUILD // There are a few features that reference Microsoft internal resources. We can't build these // in the open source version. #define USE_MSVCDIS // Disable CoreDisTools until coredistools.dll is statically-linked to the CRT, or until it is delayload linked. //#define USE_COREDISTOOLS #endif // INTERNAL_BUILD #ifdef _MSC_VER #pragma warning(disable : 4996) // The compiler encountered a deprecated declaration. // On Windows, we build against PAL macros that convert to Windows SEH. But we don't want all the // Contract stuff that normally gets pulled it. Defining JIT_BUILD prevents this, just as it does // when building the JIT using parts of utilcode. #define JIT_BUILD // Defining this prevents: // error C2338 : / RTCc rejects conformant code, so it isn't supported by the C++ Standard Library. // Either remove this compiler option, or define _ALLOW_RTCc_IN_STL to acknowledge that you have received this // warning. #ifndef _ALLOW_RTCc_IN_STL #define _ALLOW_RTCc_IN_STL #endif #define MSC_ONLY(x) x #else // !_MSC_VER #define MSC_ONLY(x) #endif // !_MSC_VER #ifndef _CRT_SECURE_NO_WARNINGS #define _CRT_SECURE_NO_WARNINGS #endif // _CRT_SECURE_NO_WARNINGS #define _CRT_RAND_S #include <stdio.h> #include <string.h> #include <stdlib.h> #include <stddef.h> #include <malloc.h> #include <assert.h> #include <wchar.h> #include <tchar.h> #include <specstrings.h> #include <math.h> #include <limits.h> #include <ctype.h> #include <stdarg.h> // Getting STL to work with PAL is difficult, so reimplement STL functionality to not require it. #ifdef TARGET_UNIX #include "clr_std/string" #include "clr_std/algorithm" #include "clr_std/vector" #else // !TARGET_UNIX #ifndef USE_STL #define USE_STL #endif // USE_STL #include <string> #include <algorithm> #include <vector> #endif // !TARGET_UNIX #ifdef USE_MSVCDIS #define DISLIB #include "..\external\msvcdis\inc\msvcdis.h" #include "..\external\msvcdis\inc\disx86.h" #include "..\external\msvcdis\inc\disarm64.h" #endif // USE_MSVCDIS #ifndef DIRECTORY_SEPARATOR_CHAR_A #define DIRECTORY_SEPARATOR_CHAR_A '\\' #endif #ifndef DIRECTORY_SEPARATOR_STR_A #define DIRECTORY_SEPARATOR_STR_A "\\" #endif #ifndef W #ifdef TARGET_UNIX #define W(str) u##str #else // TARGET_UNIX #define W(str) L##str #endif // TARGET_UNIX #endif // !W #ifndef DIRECTORY_SEPARATOR_STR_W #define DIRECTORY_SEPARATOR_STR_W W("\\") #endif #ifdef TARGET_UNIX #define PLATFORM_SHARED_LIB_SUFFIX_A PAL_SHLIB_SUFFIX #else // !TARGET_UNIX #define PLATFORM_SHARED_LIB_SUFFIX_A ".dll" #endif // !TARGET_UNIX #define DEFAULT_REAL_JIT_NAME_A MAKEDLLNAME_A("clrjit2") #define DEFAULT_REAL_JIT_NAME_W MAKEDLLNAME_W("clrjit2") #if !defined(_MSC_VER) && !defined(__llvm__) static inline void __debugbreak() { DebugBreak(); } #endif #endif // STANDARDPCH_H
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #ifndef STANDARDPCH_H #define STANDARDPCH_H // The point of a PCH file is to never reparse files that never change. // Only include files here that will almost NEVER change. Headers for the project // itself are probably inappropriate, because if you change them, the entire // project will require a recompile. Generally just put SDK style stuff here... #ifndef WIN32_LEAN_AND_MEAN #define WIN32_LEAN_AND_MEAN #endif // WIN32_LEAN_AND_MEAN #include <windows.h> #ifdef INTERNAL_BUILD // There are a few features that reference Microsoft internal resources. We can't build these // in the open source version. #define USE_MSVCDIS // Disable CoreDisTools until coredistools.dll is statically-linked to the CRT, or until it is delayload linked. //#define USE_COREDISTOOLS #endif // INTERNAL_BUILD #ifdef _MSC_VER // On Windows, we build against PAL macros that convert to Windows SEH. But we don't want all the // Contract stuff that normally gets pulled it. Defining JIT_BUILD prevents this, just as it does // when building the JIT using parts of utilcode. #define JIT_BUILD // Defining this prevents: // error C2338 : / RTCc rejects conformant code, so it isn't supported by the C++ Standard Library. // Either remove this compiler option, or define _ALLOW_RTCc_IN_STL to acknowledge that you have received this // warning. #ifndef _ALLOW_RTCc_IN_STL #define _ALLOW_RTCc_IN_STL #endif #define MSC_ONLY(x) x #else // !_MSC_VER #define MSC_ONLY(x) #endif // !_MSC_VER #ifndef _CRT_SECURE_NO_WARNINGS #define _CRT_SECURE_NO_WARNINGS #endif // _CRT_SECURE_NO_WARNINGS #define _CRT_RAND_S #include <stdio.h> #include <string.h> #include <stdlib.h> #include <stddef.h> #include <malloc.h> #include <assert.h> #include <wchar.h> #include <tchar.h> #include <specstrings.h> #include <math.h> #include <limits.h> #include <ctype.h> #include <stdarg.h> // Getting STL to work with PAL is difficult, so reimplement STL functionality to not require it. #ifdef TARGET_UNIX #include "clr_std/string" #include "clr_std/algorithm" #include "clr_std/vector" #else // !TARGET_UNIX #ifndef USE_STL #define USE_STL #endif // USE_STL #include <string> #include <algorithm> #include <vector> #endif // !TARGET_UNIX #ifdef USE_MSVCDIS #define DISLIB #include "..\external\msvcdis\inc\msvcdis.h" #include "..\external\msvcdis\inc\disx86.h" #include "..\external\msvcdis\inc\disarm64.h" #endif // USE_MSVCDIS #ifndef DIRECTORY_SEPARATOR_CHAR_A #define DIRECTORY_SEPARATOR_CHAR_A '\\' #endif #ifndef DIRECTORY_SEPARATOR_STR_A #define DIRECTORY_SEPARATOR_STR_A "\\" #endif #ifndef W #ifdef TARGET_UNIX #define W(str) u##str #else // TARGET_UNIX #define W(str) L##str #endif // TARGET_UNIX #endif // !W #ifndef DIRECTORY_SEPARATOR_STR_W #define DIRECTORY_SEPARATOR_STR_W W("\\") #endif #ifdef TARGET_UNIX #define PLATFORM_SHARED_LIB_SUFFIX_A PAL_SHLIB_SUFFIX #else // !TARGET_UNIX #define PLATFORM_SHARED_LIB_SUFFIX_A ".dll" #endif // !TARGET_UNIX #define DEFAULT_REAL_JIT_NAME_A MAKEDLLNAME_A("clrjit2") #define DEFAULT_REAL_JIT_NAME_W MAKEDLLNAME_W("clrjit2") #if !defined(_MSC_VER) && !defined(__llvm__) static inline void __debugbreak() { DebugBreak(); } #endif #endif // STANDARDPCH_H
1
dotnet/runtime
66,234
Remove compiler warning suppression
Contributes to https://github.com/dotnet/runtime/issues/66154 All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address. ~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~ Upstream PR: https://github.com/libunwind/libunwind/pull/333 /cc @GrabYourPitchforks
AaronRobinsonMSFT
2022-03-05T03:45:49Z
2022-03-09T00:57:12Z
220e67755ae6323a01011b83070dff6f84b02519
853e494abd1c1e12d28a76829b4680af7f694afa
Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154 All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address. ~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~ Upstream PR: https://github.com/libunwind/libunwind/pull/333 /cc @GrabYourPitchforks
./src/coreclr/vm/codeman.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // codeman.cpp - a managment class for handling multiple code managers // // #include "common.h" #include "jitinterface.h" #include "corjit.h" #include "jithost.h" #include "eetwain.h" #include "eeconfig.h" #include "excep.h" #include "appdomain.hpp" #include "codeman.h" #include "nibblemapmacros.h" #include "generics.h" #include "dynamicmethod.h" #include "eemessagebox.h" #include "eventtrace.h" #include "threadsuspend.h" #include "exceptionhandling.h" #include "rtlfunctions.h" #include "shimload.h" #include "debuginfostore.h" #include "strsafe.h" #include "configuration.h" #ifdef HOST_64BIT #define CHECK_DUPLICATED_STRUCT_LAYOUTS #include "../debug/daccess/fntableaccess.h" #endif // HOST_64BIT #ifdef FEATURE_PERFMAP #include "perfmap.h" #endif // Default number of jump stubs in a jump stub block #define DEFAULT_JUMPSTUBS_PER_BLOCK 32 SPTR_IMPL(EECodeManager, ExecutionManager, m_pDefaultCodeMan); SPTR_IMPL(EEJitManager, ExecutionManager, m_pEEJitManager); #ifdef FEATURE_READYTORUN SPTR_IMPL(ReadyToRunJitManager, ExecutionManager, m_pReadyToRunJitManager); #endif #ifndef DACCESS_COMPILE Volatile<RangeSection *> ExecutionManager::m_CodeRangeList = NULL; Volatile<LONG> ExecutionManager::m_dwReaderCount = 0; Volatile<LONG> ExecutionManager::m_dwWriterLock = 0; #else SPTR_IMPL(RangeSection, ExecutionManager, m_CodeRangeList); SVAL_IMPL(LONG, ExecutionManager, m_dwReaderCount); SVAL_IMPL(LONG, ExecutionManager, m_dwWriterLock); #endif #ifndef DACCESS_COMPILE CrstStatic ExecutionManager::m_JumpStubCrst; CrstStatic ExecutionManager::m_RangeCrst; unsigned ExecutionManager::m_normal_JumpStubLookup; unsigned ExecutionManager::m_normal_JumpStubUnique; unsigned ExecutionManager::m_normal_JumpStubBlockAllocCount; unsigned ExecutionManager::m_normal_JumpStubBlockFullCount; unsigned ExecutionManager::m_LCG_JumpStubLookup; unsigned ExecutionManager::m_LCG_JumpStubUnique; unsigned ExecutionManager::m_LCG_JumpStubBlockAllocCount; unsigned ExecutionManager::m_LCG_JumpStubBlockFullCount; #endif // DACCESS_COMPILE #if defined(TARGET_AMD64) && !defined(DACCESS_COMPILE) // We don't do this on ARM just amd64 // Support for new style unwind information (to allow OS to stack crawl JIT compiled code). typedef NTSTATUS (WINAPI* RtlAddGrowableFunctionTableFnPtr) ( PVOID *DynamicTable, PRUNTIME_FUNCTION FunctionTable, ULONG EntryCount, ULONG MaximumEntryCount, ULONG_PTR rangeStart, ULONG_PTR rangeEnd); typedef VOID (WINAPI* RtlGrowFunctionTableFnPtr) (PVOID DynamicTable, ULONG NewEntryCount); typedef VOID (WINAPI* RtlDeleteGrowableFunctionTableFnPtr) (PVOID DynamicTable); // OS entry points (only exist on Win8 and above) static RtlAddGrowableFunctionTableFnPtr pRtlAddGrowableFunctionTable; static RtlGrowFunctionTableFnPtr pRtlGrowFunctionTable; static RtlDeleteGrowableFunctionTableFnPtr pRtlDeleteGrowableFunctionTable; static Volatile<bool> RtlUnwindFtnsInited; // statics for UnwindInfoTable Crst* UnwindInfoTable::s_pUnwindInfoTableLock = NULL; Volatile<bool> UnwindInfoTable::s_publishingActive = false; #if _DEBUG // Fake functions on Win7 checked build to excercize the code paths, they are no-ops NTSTATUS WINAPI FakeRtlAddGrowableFunctionTable ( PVOID *DynamicTable, PT_RUNTIME_FUNCTION FunctionTable, ULONG EntryCount, ULONG MaximumEntryCount, ULONG_PTR rangeStart, ULONG_PTR rangeEnd) { *DynamicTable = (PVOID) 1; return 0; } VOID WINAPI FakeRtlGrowFunctionTable (PVOID DynamicTable, ULONG NewEntryCount) { } VOID WINAPI FakeRtlDeleteGrowableFunctionTable (PVOID DynamicTable) {} #endif /****************************************************************************/ // initialize the entry points for new win8 unwind info publishing functions. // return true if the initialize is successful (the functions exist) bool InitUnwindFtns() { CONTRACTL { NOTHROW; } CONTRACTL_END; #ifndef TARGET_UNIX if (!RtlUnwindFtnsInited) { HINSTANCE hNtdll = WszGetModuleHandle(W("ntdll.dll")); if (hNtdll != NULL) { void* growFunctionTable = GetProcAddress(hNtdll, "RtlGrowFunctionTable"); void* deleteGrowableFunctionTable = GetProcAddress(hNtdll, "RtlDeleteGrowableFunctionTable"); void* addGrowableFunctionTable = GetProcAddress(hNtdll, "RtlAddGrowableFunctionTable"); // All or nothing AddGroableFunctionTable is last (marker) if (growFunctionTable != NULL && deleteGrowableFunctionTable != NULL && addGrowableFunctionTable != NULL) { pRtlGrowFunctionTable = (RtlGrowFunctionTableFnPtr) growFunctionTable; pRtlDeleteGrowableFunctionTable = (RtlDeleteGrowableFunctionTableFnPtr) deleteGrowableFunctionTable; pRtlAddGrowableFunctionTable = (RtlAddGrowableFunctionTableFnPtr) addGrowableFunctionTable; } // Don't call FreeLibrary(hNtdll) because GetModuleHandle did *NOT* increment the reference count! } else { #if _DEBUG pRtlGrowFunctionTable = FakeRtlGrowFunctionTable; pRtlDeleteGrowableFunctionTable = FakeRtlDeleteGrowableFunctionTable; pRtlAddGrowableFunctionTable = FakeRtlAddGrowableFunctionTable; #endif } RtlUnwindFtnsInited = true; } return (pRtlAddGrowableFunctionTable != NULL); #else // !TARGET_UNIX return false; #endif // !TARGET_UNIX } /****************************************************************************/ UnwindInfoTable::UnwindInfoTable(ULONG_PTR rangeStart, ULONG_PTR rangeEnd, ULONG size) { STANDARD_VM_CONTRACT; _ASSERTE(s_pUnwindInfoTableLock->OwnedByCurrentThread()); _ASSERTE((rangeEnd - rangeStart) <= 0x7FFFFFFF); cTableCurCount = 0; cTableMaxCount = size; cDeletedEntries = 0; iRangeStart = rangeStart; iRangeEnd = rangeEnd; hHandle = NULL; pTable = new T_RUNTIME_FUNCTION[cTableMaxCount]; } /****************************************************************************/ UnwindInfoTable::~UnwindInfoTable() { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; _ASSERTE(s_publishingActive); // We do this lock free to because too many places still want no-trigger. It should be OK // It would be cleaner if we could take the lock (we did not have to be GC_NOTRIGGER) UnRegister(); delete[] pTable; } /*****************************************************************************/ void UnwindInfoTable::Register() { _ASSERTE(s_pUnwindInfoTableLock->OwnedByCurrentThread()); EX_TRY { hHandle = NULL; NTSTATUS ret = pRtlAddGrowableFunctionTable(&hHandle, pTable, cTableCurCount, cTableMaxCount, iRangeStart, iRangeEnd); if (ret != STATUS_SUCCESS) { _ASSERTE(!"Failed to publish UnwindInfo (ignorable)"); hHandle = NULL; STRESS_LOG3(LF_JIT, LL_ERROR, "UnwindInfoTable::Register ERROR %x creating table [%p, %p]\n", ret, iRangeStart, iRangeEnd); } else { STRESS_LOG3(LF_JIT, LL_INFO100, "UnwindInfoTable::Register Handle: %p [%p, %p]\n", hHandle, iRangeStart, iRangeEnd); } } EX_CATCH { hHandle = NULL; STRESS_LOG2(LF_JIT, LL_ERROR, "UnwindInfoTable::Register Exception while creating table [%p, %p]\n", iRangeStart, iRangeEnd); _ASSERTE(!"Failed to publish UnwindInfo (ignorable)"); } EX_END_CATCH(SwallowAllExceptions) } /*****************************************************************************/ void UnwindInfoTable::UnRegister() { PVOID handle = hHandle; hHandle = 0; if (handle != 0) { STRESS_LOG3(LF_JIT, LL_INFO100, "UnwindInfoTable::UnRegister Handle: %p [%p, %p]\n", handle, iRangeStart, iRangeEnd); pRtlDeleteGrowableFunctionTable(handle); } } /*****************************************************************************/ // Add 'data' to the linked list whose head is pointed at by 'unwindInfoPtr' // /* static */ void UnwindInfoTable::AddToUnwindInfoTable(UnwindInfoTable** unwindInfoPtr, PT_RUNTIME_FUNCTION data, TADDR rangeStart, TADDR rangeEnd) { CONTRACTL { THROWS; GC_TRIGGERS; } CONTRACTL_END; _ASSERTE(data->BeginAddress <= RUNTIME_FUNCTION__EndAddress(data, rangeStart)); _ASSERTE(RUNTIME_FUNCTION__EndAddress(data, rangeStart) <= (rangeEnd-rangeStart)); _ASSERTE(unwindInfoPtr != NULL); if (!s_publishingActive) return; CrstHolder ch(s_pUnwindInfoTableLock); UnwindInfoTable* unwindInfo = *unwindInfoPtr; // was the original list null, If so lazy initialize. if (unwindInfo == NULL) { // We can choose the average method size estimate dynamically based on past experience // 128 is the estimated size of an average method, so we can accurately predict // how many RUNTIME_FUNCTION entries are in each chunk we allocate. ULONG size = (ULONG) ((rangeEnd - rangeStart) / 128) + 1; // To insure the test the growing logic in debug code make the size much smaller. INDEBUG(size = size / 4 + 1); unwindInfo = (PTR_UnwindInfoTable)new UnwindInfoTable(rangeStart, rangeEnd, size); unwindInfo->Register(); *unwindInfoPtr = unwindInfo; } _ASSERTE(unwindInfo != NULL); // If new had failed, we would have thrown OOM _ASSERTE(unwindInfo->cTableCurCount <= unwindInfo->cTableMaxCount); _ASSERTE(unwindInfo->iRangeStart == rangeStart); _ASSERTE(unwindInfo->iRangeEnd == rangeEnd); // Means we had a failure publishing to the OS, in this case we give up if (unwindInfo->hHandle == NULL) return; // Check for the fast path: we are adding the the end of an UnwindInfoTable with space if (unwindInfo->cTableCurCount < unwindInfo->cTableMaxCount) { if (unwindInfo->cTableCurCount == 0 || unwindInfo->pTable[unwindInfo->cTableCurCount-1].BeginAddress < data->BeginAddress) { // Yeah, we can simply add to the end of table and we are done! unwindInfo->pTable[unwindInfo->cTableCurCount] = *data; unwindInfo->cTableCurCount++; // Add to the function table pRtlGrowFunctionTable(unwindInfo->hHandle, unwindInfo->cTableCurCount); STRESS_LOG5(LF_JIT, LL_INFO1000, "AddToUnwindTable Handle: %p [%p, %p] ADDING 0x%p TO END, now 0x%x entries\n", unwindInfo->hHandle, unwindInfo->iRangeStart, unwindInfo->iRangeEnd, data->BeginAddress, unwindInfo->cTableCurCount); return; } } // OK we need to rellocate the table and reregister. First figure out our 'desiredSpace' // We could imagine being much more efficient for 'bulk' updates, but we don't try // because we assume that this is rare and we want to keep the code simple ULONG usedSpace = unwindInfo->cTableCurCount - unwindInfo->cDeletedEntries; ULONG desiredSpace = usedSpace * 5 / 4 + 1; // Increase by 20% // Be more aggresive if we used all of our space; if (usedSpace == unwindInfo->cTableMaxCount) desiredSpace = usedSpace * 3 / 2 + 1; // Increase by 50% STRESS_LOG7(LF_JIT, LL_INFO100, "AddToUnwindTable Handle: %p [%p, %p] SLOW Realloc Cnt 0x%x Max 0x%x NewMax 0x%x, Adding %x\n", unwindInfo->hHandle, unwindInfo->iRangeStart, unwindInfo->iRangeEnd, unwindInfo->cTableCurCount, unwindInfo->cTableMaxCount, desiredSpace, data->BeginAddress); UnwindInfoTable* newTab = new UnwindInfoTable(unwindInfo->iRangeStart, unwindInfo->iRangeEnd, desiredSpace); // Copy in the entries, removing deleted entries and adding the new entry wherever it belongs int toIdx = 0; bool inserted = false; // Have we inserted 'data' into the table for(ULONG fromIdx = 0; fromIdx < unwindInfo->cTableCurCount; fromIdx++) { if (!inserted && data->BeginAddress < unwindInfo->pTable[fromIdx].BeginAddress) { STRESS_LOG1(LF_JIT, LL_INFO100, "AddToUnwindTable Inserted at MID position 0x%x\n", toIdx); newTab->pTable[toIdx++] = *data; inserted = true; } if (unwindInfo->pTable[fromIdx].UnwindData != 0) // A 'non-deleted' entry newTab->pTable[toIdx++] = unwindInfo->pTable[fromIdx]; } if (!inserted) { STRESS_LOG1(LF_JIT, LL_INFO100, "AddToUnwindTable Inserted at END position 0x%x\n", toIdx); newTab->pTable[toIdx++] = *data; } newTab->cTableCurCount = toIdx; STRESS_LOG2(LF_JIT, LL_INFO100, "AddToUnwindTable New size 0x%x max 0x%x\n", newTab->cTableCurCount, newTab->cTableMaxCount); _ASSERTE(newTab->cTableCurCount <= newTab->cTableMaxCount); // Unregister the old table *unwindInfoPtr = 0; unwindInfo->UnRegister(); // Note that there is a short time when we are not publishing... // Register the new table newTab->Register(); *unwindInfoPtr = newTab; delete unwindInfo; } /*****************************************************************************/ /* static */ void UnwindInfoTable::RemoveFromUnwindInfoTable(UnwindInfoTable** unwindInfoPtr, TADDR baseAddress, TADDR entryPoint) { CONTRACTL { NOTHROW; GC_TRIGGERS; } CONTRACTL_END; _ASSERTE(unwindInfoPtr != NULL); if (!s_publishingActive) return; CrstHolder ch(s_pUnwindInfoTableLock); UnwindInfoTable* unwindInfo = *unwindInfoPtr; if (unwindInfo != NULL) { DWORD relativeEntryPoint = (DWORD)(entryPoint - baseAddress); STRESS_LOG3(LF_JIT, LL_INFO100, "RemoveFromUnwindInfoTable Removing %p BaseAddress %p rel %x\n", entryPoint, baseAddress, relativeEntryPoint); for(ULONG i = 0; i < unwindInfo->cTableCurCount; i++) { if (unwindInfo->pTable[i].BeginAddress <= relativeEntryPoint && relativeEntryPoint < RUNTIME_FUNCTION__EndAddress(&unwindInfo->pTable[i], unwindInfo->iRangeStart)) { if (unwindInfo->pTable[i].UnwindData != 0) unwindInfo->cDeletedEntries++; unwindInfo->pTable[i].UnwindData = 0; // Mark the entry for deletion STRESS_LOG1(LF_JIT, LL_INFO100, "RemoveFromUnwindInfoTable Removed entry 0x%x\n", i); return; } } } STRESS_LOG2(LF_JIT, LL_WARNING, "RemoveFromUnwindInfoTable COULD NOT FIND %p BaseAddress %p\n", entryPoint, baseAddress); } /****************************************************************************/ // Publish the stack unwind data 'data' which is relative 'baseAddress' // to the operating system in a way ETW stack tracing can use. /* static */ void UnwindInfoTable::PublishUnwindInfoForMethod(TADDR baseAddress, PT_RUNTIME_FUNCTION unwindInfo, int unwindInfoCount) { STANDARD_VM_CONTRACT; if (!s_publishingActive) return; TADDR entry = baseAddress + unwindInfo->BeginAddress; RangeSection * pRS = ExecutionManager::FindCodeRange(entry, ExecutionManager::GetScanFlags()); _ASSERTE(pRS != NULL); if (pRS != NULL) { for(int i = 0; i < unwindInfoCount; i++) AddToUnwindInfoTable(&pRS->pUnwindInfoTable, &unwindInfo[i], pRS->LowAddress, pRS->HighAddress); } } /*****************************************************************************/ /* static */ void UnwindInfoTable::UnpublishUnwindInfoForMethod(TADDR entryPoint) { CONTRACTL { NOTHROW; GC_TRIGGERS; } CONTRACTL_END; if (!s_publishingActive) return; RangeSection * pRS = ExecutionManager::FindCodeRange(entryPoint, ExecutionManager::GetScanFlags()); _ASSERTE(pRS != NULL); if (pRS != NULL) { _ASSERTE(pRS->pjit->GetCodeType() == (miManaged | miIL)); if (pRS->pjit->GetCodeType() == (miManaged | miIL)) { // This cast is justified because only EEJitManager's have the code type above. EEJitManager* pJitMgr = (EEJitManager*)(pRS->pjit); CodeHeader * pHeader = pJitMgr->GetCodeHeaderFromStartAddress(entryPoint); for(ULONG i = 0; i < pHeader->GetNumberOfUnwindInfos(); i++) RemoveFromUnwindInfoTable(&pRS->pUnwindInfoTable, pRS->LowAddress, pRS->LowAddress + pHeader->GetUnwindInfo(i)->BeginAddress); } } } #ifdef STUBLINKER_GENERATES_UNWIND_INFO extern StubUnwindInfoHeapSegment *g_StubHeapSegments; #endif // STUBLINKER_GENERATES_UNWIND_INFO extern CrstStatic g_StubUnwindInfoHeapSegmentsCrst; /*****************************************************************************/ // Publish all existing JIT compiled methods by iterating through the code heap // Note that because we need to keep the entries in order we have to hold // s_pUnwindInfoTableLock so that all entries get inserted in the correct order. // (we rely on heapIterator walking the methods in a heap section in order). /* static */ void UnwindInfoTable::PublishUnwindInfoForExistingMethods() { STANDARD_VM_CONTRACT; { // CodeHeapIterator holds the m_CodeHeapCritSec, which insures code heaps don't get deallocated while being walked EEJitManager::CodeHeapIterator heapIterator(NULL); // Currently m_CodeHeapCritSec is given the CRST_UNSAFE_ANYMODE flag which allows it to be taken in a GC_NOTRIGGER // region but also disallows GC_TRIGGERS. We need GC_TRIGGERS because we take another lock. Ideally we would // fix m_CodeHeapCritSec to not have the CRST_UNSAFE_ANYMODE flag, but I currently reached my threshold for fixing // contracts. CONTRACT_VIOLATION(GCViolation); while(heapIterator.Next()) { MethodDesc *pMD = heapIterator.GetMethod(); if(pMD) { PCODE methodEntry =(PCODE) heapIterator.GetMethodCode(); RangeSection * pRS = ExecutionManager::FindCodeRange(methodEntry, ExecutionManager::GetScanFlags()); _ASSERTE(pRS != NULL); _ASSERTE(pRS->pjit->GetCodeType() == (miManaged | miIL)); if (pRS != NULL && pRS->pjit->GetCodeType() == (miManaged | miIL)) { // This cast is justified because only EEJitManager's have the code type above. EEJitManager* pJitMgr = (EEJitManager*)(pRS->pjit); CodeHeader * pHeader = pJitMgr->GetCodeHeaderFromStartAddress(methodEntry); int unwindInfoCount = pHeader->GetNumberOfUnwindInfos(); for(int i = 0; i < unwindInfoCount; i++) AddToUnwindInfoTable(&pRS->pUnwindInfoTable, pHeader->GetUnwindInfo(i), pRS->LowAddress, pRS->HighAddress); } } } } #ifdef STUBLINKER_GENERATES_UNWIND_INFO // Enumerate all existing stubs CrstHolder crst(&g_StubUnwindInfoHeapSegmentsCrst); for (StubUnwindInfoHeapSegment* pStubHeapSegment = g_StubHeapSegments; pStubHeapSegment; pStubHeapSegment = pStubHeapSegment->pNext) { // The stubs are in reverse order, so we reverse them so they are in memory order CQuickArrayList<StubUnwindInfoHeader*> list; for (StubUnwindInfoHeader *pHeader = pStubHeapSegment->pUnwindHeaderList; pHeader; pHeader = pHeader->pNext) list.Push(pHeader); for(int i = (int) list.Size()-1; i >= 0; --i) { StubUnwindInfoHeader *pHeader = list[i]; AddToUnwindInfoTable(&pStubHeapSegment->pUnwindInfoTable, &pHeader->FunctionEntry, (TADDR) pStubHeapSegment->pbBaseAddress, (TADDR) pStubHeapSegment->pbBaseAddress + pStubHeapSegment->cbSegment); } } #endif // STUBLINKER_GENERATES_UNWIND_INFO } /*****************************************************************************/ // turn on the publishing of unwind info. Called when the ETW rundown provider // is turned on. /* static */ void UnwindInfoTable::PublishUnwindInfo(bool publishExisting) { CONTRACTL { NOTHROW; GC_TRIGGERS; } CONTRACTL_END; if (s_publishingActive) return; // If we don't have the APIs we need, give up if (!InitUnwindFtns()) return; EX_TRY { // Create the lock Crst* newCrst = new Crst(CrstUnwindInfoTableLock); if (InterlockedCompareExchangeT(&s_pUnwindInfoTableLock, newCrst, NULL) == NULL) { s_publishingActive = true; if (publishExisting) PublishUnwindInfoForExistingMethods(); } else delete newCrst; // we were in a race and failed, throw away the Crst we made. } EX_CATCH { STRESS_LOG1(LF_JIT, LL_ERROR, "Exception happened when doing unwind Info rundown. EIP of last AV = %p\n", g_LastAccessViolationEIP); _ASSERTE(!"Exception thrown while publishing 'catchup' ETW unwind information"); s_publishingActive = false; // Try to minimize damage. } EX_END_CATCH(SwallowAllExceptions); } #endif // defined(TARGET_AMD64) && !defined(DACCESS_COMPILE) /*----------------------------------------------------------------------------- This is a listing of which methods uses which synchronization mechanism in the EEJitManager. //----------------------------------------------------------------------------- Setters of EEJitManager::m_CodeHeapCritSec ----------------------------------------------- allocCode allocGCInfo allocEHInfo allocJumpStubBlock ResolveEHClause RemoveJitData Unload ReleaseReferenceToHeap JitCodeToMethodInfo Need EEJitManager::m_CodeHeapCritSec to be set ----------------------------------------------- NewCodeHeap allocCodeRaw GetCodeHeapList RemoveCodeHeapFromDomainList DeleteCodeHeap AddRangeToJitHeapCache DeleteJitHeapCache */ #if !defined(DACCESS_COMPILE) EEJitManager::CodeHeapIterator::CodeHeapIterator(LoaderAllocator *pLoaderAllocatorFilter) : m_lockHolder(&(ExecutionManager::GetEEJitManager()->m_CodeHeapCritSec)), m_Iterator(NULL, 0, NULL, 0) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; m_pHeapList = NULL; m_pLoaderAllocator = pLoaderAllocatorFilter; m_pHeapList = ExecutionManager::GetEEJitManager()->GetCodeHeapList(); if(m_pHeapList) new (&m_Iterator) MethodSectionIterator((const void *)m_pHeapList->mapBase, (COUNT_T)m_pHeapList->maxCodeHeapSize, m_pHeapList->pHdrMap, (COUNT_T)HEAP2MAPSIZE(ROUND_UP_TO_PAGE(m_pHeapList->maxCodeHeapSize))); }; EEJitManager::CodeHeapIterator::~CodeHeapIterator() { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; } BOOL EEJitManager::CodeHeapIterator::Next() { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; if(!m_pHeapList) return FALSE; while(1) { if(!m_Iterator.Next()) { m_pHeapList = m_pHeapList->GetNext(); if(!m_pHeapList) return FALSE; new (&m_Iterator) MethodSectionIterator((const void *)m_pHeapList->mapBase, (COUNT_T)m_pHeapList->maxCodeHeapSize, m_pHeapList->pHdrMap, (COUNT_T)HEAP2MAPSIZE(ROUND_UP_TO_PAGE(m_pHeapList->maxCodeHeapSize))); } else { BYTE * code = m_Iterator.GetMethodCode(); CodeHeader * pHdr = (CodeHeader *)(code - sizeof(CodeHeader)); m_pCurrent = !pHdr->IsStubCodeBlock() ? pHdr->GetMethodDesc() : NULL; // LoaderAllocator filter if (m_pLoaderAllocator && m_pCurrent) { LoaderAllocator *pCurrentLoaderAllocator = m_pCurrent->GetLoaderAllocator(); if(pCurrentLoaderAllocator != m_pLoaderAllocator) continue; } return TRUE; } } } #endif // !DACCESS_COMPILE #ifndef DACCESS_COMPILE //--------------------------------------------------------------------------------------- // // ReaderLockHolder::ReaderLockHolder takes the reader lock, checks for the writer lock // and either aborts if the writer lock is held, or yields until the writer lock is released, // keeping the reader lock. This is normally called in the constructor for the // ReaderLockHolder. // // The writer cannot be taken if there are any readers. The WriterLockHolder functions take the // writer lock and check for any readers. If there are any, the WriterLockHolder functions // release the writer and yield to wait for the readers to be done. ExecutionManager::ReaderLockHolder::ReaderLockHolder(HostCallPreference hostCallPreference /*=AllowHostCalls*/) { CONTRACTL { NOTHROW; if (hostCallPreference == AllowHostCalls) { HOST_CALLS; } else { HOST_NOCALLS; } GC_NOTRIGGER; CAN_TAKE_LOCK; } CONTRACTL_END; IncCantAllocCount(); FastInterlockIncrement(&m_dwReaderCount); EE_LOCK_TAKEN(GetPtrForLockContract()); if (VolatileLoad(&m_dwWriterLock) != 0) { if (hostCallPreference != AllowHostCalls) { // Rats, writer lock is held. Gotta bail. Since the reader count was already // incremented, we're technically still blocking writers at the moment. But // the holder who called us is about to call DecrementReader in its // destructor and unblock writers. return; } YIELD_WHILE ((VolatileLoad(&m_dwWriterLock) != 0)); } } //--------------------------------------------------------------------------------------- // // See code:ExecutionManager::ReaderLockHolder::ReaderLockHolder. This just decrements the reader count. ExecutionManager::ReaderLockHolder::~ReaderLockHolder() { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; FastInterlockDecrement(&m_dwReaderCount); DecCantAllocCount(); EE_LOCK_RELEASED(GetPtrForLockContract()); } //--------------------------------------------------------------------------------------- // // Returns whether the reader lock is acquired BOOL ExecutionManager::ReaderLockHolder::Acquired() { LIMITED_METHOD_CONTRACT; return VolatileLoad(&m_dwWriterLock) == 0; } ExecutionManager::WriterLockHolder::WriterLockHolder() { CONTRACTL { NOTHROW; GC_NOTRIGGER; CAN_TAKE_LOCK; } CONTRACTL_END; _ASSERTE(m_dwWriterLock == 0); // Signal to a debugger that this thread cannot stop now IncCantStopCount(); IncCantAllocCount(); DWORD dwSwitchCount = 0; while (TRUE) { // While this thread holds the writer lock, we must not try to suspend it // or allow a profiler to walk its stack Thread::IncForbidSuspendThread(); FastInterlockIncrement(&m_dwWriterLock); if (m_dwReaderCount == 0) break; FastInterlockDecrement(&m_dwWriterLock); // Before we loop and retry, it's safe to suspend or hijack and inspect // this thread Thread::DecForbidSuspendThread(); __SwitchToThread(0, ++dwSwitchCount); } EE_LOCK_TAKEN(GetPtrForLockContract()); } ExecutionManager::WriterLockHolder::~WriterLockHolder() { LIMITED_METHOD_CONTRACT; FastInterlockDecrement(&m_dwWriterLock); // Writer lock released, so it's safe again for this thread to be // suspended or have its stack walked by a profiler Thread::DecForbidSuspendThread(); DecCantAllocCount(); // Signal to a debugger that it's again safe to stop this thread DecCantStopCount(); EE_LOCK_RELEASED(GetPtrForLockContract()); } #else // For DAC builds, we only care whether the writer lock is held. // If it is, we will assume the locked data is in an inconsistent // state and throw. We never actually take the lock. // Note: Throws ExecutionManager::ReaderLockHolder::ReaderLockHolder(HostCallPreference hostCallPreference /*=AllowHostCalls*/) { SUPPORTS_DAC; if (m_dwWriterLock != 0) { ThrowHR(CORDBG_E_PROCESS_NOT_SYNCHRONIZED); } } ExecutionManager::ReaderLockHolder::~ReaderLockHolder() { } #endif // DACCESS_COMPILE /*----------------------------------------------------------------------------- This is a listing of which methods uses which synchronization mechanism in the ExecutionManager //----------------------------------------------------------------------------- ============================================================================== ExecutionManger::ReaderLockHolder and ExecutionManger::WriterLockHolder Protects the callers of ExecutionManager::GetRangeSection from heap deletions while walking RangeSections. You need to take a reader lock before reading the values: m_CodeRangeList and hold it while walking the lists Uses ReaderLockHolder (allows multiple reeaders with no writers) ----------------------------------------- ExecutionManager::FindCodeRange ExecutionManager::FindZapModule ExecutionManager::EnumMemoryRegions Uses WriterLockHolder (allows single writer and no readers) ----------------------------------------- ExecutionManager::AddRangeHelper ExecutionManager::DeleteRangeHelper */ //----------------------------------------------------------------------------- #if defined(TARGET_ARM) || defined(TARGET_ARM64) #define EXCEPTION_DATA_SUPPORTS_FUNCTION_FRAGMENTS #endif #if defined(EXCEPTION_DATA_SUPPORTS_FUNCTION_FRAGMENTS) // The function fragments can be used in Hot/Cold splitting, expressing Large Functions or in 'ShrinkWrapping', which is // delaying saving and restoring some callee-saved registers later inside the body of the method. // (It's assumed that JIT will not emit any ShrinkWrapping-style methods) // For these cases multiple RUNTIME_FUNCTION entries (a.k.a function fragments) are used to define // all the regions of the function or funclet. And one of these function fragments cover the beginning of the function/funclet, // including the prolog section and is referred as the 'Host Record'. // This function returns TRUE if the inspected RUNTIME_FUNCTION entry is NOT a host record BOOL IsFunctionFragment(TADDR baseAddress, PTR_RUNTIME_FUNCTION pFunctionEntry) { LIMITED_METHOD_DAC_CONTRACT; _ASSERTE((pFunctionEntry->UnwindData & 3) == 0); // The unwind data must be an RVA; we don't support packed unwind format DWORD unwindHeader = *(PTR_DWORD)(baseAddress + pFunctionEntry->UnwindData); _ASSERTE((0 == ((unwindHeader >> 18) & 3)) || !"unknown unwind data format, version != 0"); #if defined(TARGET_ARM) // On ARM, It's assumed that the prolog is always at the beginning of the function and cannot be split. // Given that, there are 4 possible ways to fragment a function: // 1. Prolog only: // 2. Prolog and some epilogs: // 3. Epilogs only: // 4. No Prolog or epilog // // Function fragments describing 1 & 2 are host records, 3 & 4 are not. // for 3 & 4, the .xdata record's F bit is set to 1, marking clearly what is NOT a host record _ASSERTE((pFunctionEntry->BeginAddress & THUMB_CODE) == THUMB_CODE); // Sanity check: it's a thumb address DWORD Fbit = (unwindHeader >> 22) & 0x1; // F "fragment" bit return (Fbit == 1); #elif defined(TARGET_ARM64) // ARM64 is a little bit more flexible, in the sense that it supports partial prologs. However only one of the // prolog regions are allowed to alter SP and that's the Host Record. Partial prologs are used in ShrinkWrapping // scenarios which is not supported, hence we don't need to worry about them. discarding partial prologs // simplifies identifying a host record a lot. // // 1. Prolog only: The host record. Epilog Count and E bit are all 0. // 2. Prolog and some epilogs: The host record with accompanying epilog-only records // 3. Epilogs only: First unwind code is Phantom prolog (Starting with an end_c, indicating an empty prolog) // 4. No prologs or epilogs: First unwind code is Phantom prolog (Starting with an end_c, indicating an empty prolog) // int EpilogCount = (int)(unwindHeader >> 22) & 0x1F; int CodeWords = unwindHeader >> 27; PTR_DWORD pUnwindCodes = (PTR_DWORD)(baseAddress + pFunctionEntry->UnwindData); // Skip header. pUnwindCodes++; // Skip extended header. if ((CodeWords == 0) && (EpilogCount == 0)) { EpilogCount = (*pUnwindCodes) & 0xFFFF; pUnwindCodes++; } // Skip epilog scopes. BOOL Ebit = (unwindHeader >> 21) & 0x1; if (!Ebit && (EpilogCount != 0)) { // EpilogCount is the number of exception scopes defined right after the unwindHeader pUnwindCodes += EpilogCount; } return ((*pUnwindCodes & 0xFF) == 0xE5); #else PORTABILITY_ASSERT("IsFunctionFragnent - NYI on this platform"); #endif } // When we have fragmented unwind we usually want to refer to the // unwind record that includes the prolog. We can find it by searching // back in the sequence of unwind records. PTR_RUNTIME_FUNCTION FindRootEntry(PTR_RUNTIME_FUNCTION pFunctionEntry, TADDR baseAddress) { LIMITED_METHOD_DAC_CONTRACT; PTR_RUNTIME_FUNCTION pRootEntry = pFunctionEntry; if (pRootEntry != NULL) { // Walk backwards in the RUNTIME_FUNCTION array until we find a non-fragment. // We're guaranteed to find one, because we require that a fragment live in a function or funclet // that has a prolog, which will have non-fragment .xdata. for (;;) { if (!IsFunctionFragment(baseAddress, pRootEntry)) { // This is not a fragment; we're done break; } --pRootEntry; } } return pRootEntry; } #endif // EXCEPTION_DATA_SUPPORTS_FUNCTION_FRAGMENTS #ifndef DACCESS_COMPILE //********************************************************************************** // IJitManager //********************************************************************************** IJitManager::IJitManager() { LIMITED_METHOD_CONTRACT; m_runtimeSupport = ExecutionManager::GetDefaultCodeManager(); } #endif // #ifndef DACCESS_COMPILE // When we unload an appdomain, we need to make sure that any threads that are crawling through // our heap or rangelist are out. For cooperative-mode threads, we know that they will have // been stopped when we suspend the EE so they won't be touching an element that is about to be deleted. // However for pre-emptive mode threads, they could be stalled right on top of the element we want // to delete, so we need to apply the reader lock to them and wait for them to drain. ExecutionManager::ScanFlag ExecutionManager::GetScanFlags() { CONTRACTL { NOTHROW; GC_NOTRIGGER; HOST_NOCALLS; SUPPORTS_DAC; } CONTRACTL_END; #if !defined(DACCESS_COMPILE) Thread *pThread = GetThreadNULLOk(); if (!pThread) return ScanNoReaderLock; // If this thread is hijacked by a profiler and crawling its own stack, // we do need to take the lock if (pThread->GetProfilerFilterContext() != NULL) return ScanReaderLock; if (pThread->PreemptiveGCDisabled() || (pThread == ThreadSuspend::GetSuspensionThread())) return ScanNoReaderLock; return ScanReaderLock; #else return ScanNoReaderLock; #endif } #ifdef DACCESS_COMPILE void IJitManager::EnumMemoryRegions(CLRDataEnumMemoryFlags flags) { DAC_ENUM_VTHIS(); if (m_runtimeSupport.IsValid()) { m_runtimeSupport->EnumMemoryRegions(flags); } } #endif // #ifdef DACCESS_COMPILE #if defined(FEATURE_EH_FUNCLETS) PTR_VOID GetUnwindDataBlob(TADDR moduleBase, PTR_RUNTIME_FUNCTION pRuntimeFunction, /* out */ SIZE_T * pSize) { LIMITED_METHOD_CONTRACT; #if defined(TARGET_AMD64) PTR_UNWIND_INFO pUnwindInfo(dac_cast<PTR_UNWIND_INFO>(moduleBase + RUNTIME_FUNCTION__GetUnwindInfoAddress(pRuntimeFunction))); *pSize = ALIGN_UP(offsetof(UNWIND_INFO, UnwindCode) + sizeof(UNWIND_CODE) * pUnwindInfo->CountOfUnwindCodes + sizeof(ULONG) /* personality routine is always present */, sizeof(DWORD)); return pUnwindInfo; #elif defined(TARGET_X86) PTR_UNWIND_INFO pUnwindInfo(dac_cast<PTR_UNWIND_INFO>(moduleBase + RUNTIME_FUNCTION__GetUnwindInfoAddress(pRuntimeFunction))); *pSize = sizeof(UNWIND_INFO); return pUnwindInfo; #elif defined(TARGET_ARM) || defined(TARGET_ARM64) // if this function uses packed unwind data then at least one of the two least significant bits // will be non-zero. if this is the case then there will be no xdata record to enumerate. _ASSERTE((pRuntimeFunction->UnwindData & 0x3) == 0); // compute the size of the unwind info PTR_DWORD xdata = dac_cast<PTR_DWORD>(pRuntimeFunction->UnwindData + moduleBase); int size = 4; #if defined(TARGET_ARM) // See https://docs.microsoft.com/en-us/cpp/build/arm-exception-handling int unwindWords = xdata[0] >> 28; int epilogScopes = (xdata[0] >> 23) & 0x1f; #else // See https://docs.microsoft.com/en-us/cpp/build/arm64-exception-handling int unwindWords = xdata[0] >> 27; int epilogScopes = (xdata[0] >> 22) & 0x1f; #endif if (unwindWords == 0 && epilogScopes == 0) { size += 4; unwindWords = (xdata[1] >> 16) & 0xff; epilogScopes = xdata[1] & 0xffff; } if (!(xdata[0] & (1 << 21))) size += 4 * epilogScopes; size += 4 * unwindWords; _ASSERTE(xdata[0] & (1 << 20)); // personality routine should be always present size += 4; *pSize = size; return xdata; #else PORTABILITY_ASSERT("GetUnwindDataBlob"); return NULL; #endif } // GetFuncletStartAddress returns the starting address of the function or funclet indicated by the EECodeInfo address. TADDR IJitManager::GetFuncletStartAddress(EECodeInfo * pCodeInfo) { PTR_RUNTIME_FUNCTION pFunctionEntry = pCodeInfo->GetFunctionEntry(); #ifdef TARGET_AMD64 _ASSERTE((pFunctionEntry->UnwindData & RUNTIME_FUNCTION_INDIRECT) == 0); #endif TADDR baseAddress = pCodeInfo->GetModuleBase(); #if defined(EXCEPTION_DATA_SUPPORTS_FUNCTION_FRAGMENTS) pFunctionEntry = FindRootEntry(pFunctionEntry, baseAddress); #endif // EXCEPTION_DATA_SUPPORTS_FUNCTION_FRAGMENTS TADDR funcletStartAddress = baseAddress + RUNTIME_FUNCTION__BeginAddress(pFunctionEntry); return funcletStartAddress; } BOOL IJitManager::IsFunclet(EECodeInfo * pCodeInfo) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; TADDR funcletStartAddress = GetFuncletStartAddress(pCodeInfo); TADDR methodStartAddress = pCodeInfo->GetStartAddress(); return (funcletStartAddress != methodStartAddress); } BOOL IJitManager::IsFilterFunclet(EECodeInfo * pCodeInfo) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; if (!pCodeInfo->IsFunclet()) return FALSE; TADDR funcletStartAddress = GetFuncletStartAddress(pCodeInfo); // This assumes no hot/cold splitting for funclets _ASSERTE(FitsInU4(pCodeInfo->GetCodeAddress() - funcletStartAddress)); DWORD relOffsetWithinFunclet = static_cast<DWORD>(pCodeInfo->GetCodeAddress() - funcletStartAddress); _ASSERTE(pCodeInfo->GetRelOffset() >= relOffsetWithinFunclet); DWORD funcletStartOffset = pCodeInfo->GetRelOffset() - relOffsetWithinFunclet; EH_CLAUSE_ENUMERATOR pEnumState; unsigned EHCount = InitializeEHEnumeration(pCodeInfo->GetMethodToken(), &pEnumState); _ASSERTE(EHCount > 0); EE_ILEXCEPTION_CLAUSE EHClause; for (ULONG i = 0; i < EHCount; i++) { GetNextEHClause(&pEnumState, &EHClause); // Duplicate clauses are always listed at the end, so when we hit a duplicate clause, // we have already visited all of the normal clauses. if (IsDuplicateClause(&EHClause)) { break; } if (IsFilterHandler(&EHClause)) { if (EHClause.FilterOffset == funcletStartOffset) { return true; } } } return false; } #else // FEATURE_EH_FUNCLETS PTR_VOID GetUnwindDataBlob(TADDR moduleBase, PTR_RUNTIME_FUNCTION pRuntimeFunction, /* out */ SIZE_T * pSize) { *pSize = 0; return dac_cast<PTR_VOID>(pRuntimeFunction->UnwindData + moduleBase); } #endif // FEATURE_EH_FUNCLETS #ifndef DACCESS_COMPILE //********************************************************************************** // EEJitManager //********************************************************************************** EEJitManager::EEJitManager() : // CRST_DEBUGGER_THREAD - We take this lock on debugger thread during EnC add method, among other things // CRST_TAKEN_DURING_SHUTDOWN - We take this lock during shutdown if ETW is on (to do rundown) m_CodeHeapCritSec( CrstSingleUseLock, CrstFlags(CRST_UNSAFE_ANYMODE|CRST_DEBUGGER_THREAD|CRST_TAKEN_DURING_SHUTDOWN)), m_CPUCompileFlags(), m_JitLoadCritSec( CrstSingleUseLock ) { CONTRACTL { THROWS; GC_NOTRIGGER; } CONTRACTL_END; m_pCodeHeap = NULL; m_jit = NULL; m_JITCompiler = NULL; #ifdef TARGET_AMD64 m_pEmergencyJumpStubReserveList = NULL; #endif #if defined(TARGET_X86) || defined(TARGET_AMD64) m_JITCompilerOther = NULL; #endif #ifdef ALLOW_SXS_JIT m_alternateJit = NULL; m_AltJITCompiler = NULL; m_AltJITRequired = false; #endif m_cleanupList = NULL; } #if defined(TARGET_X86) || defined(TARGET_AMD64) bool DoesOSSupportAVX() { LIMITED_METHOD_CONTRACT; #ifndef TARGET_UNIX // On Windows we have an api(GetEnabledXStateFeatures) to check if AVX is supported typedef DWORD64 (WINAPI *PGETENABLEDXSTATEFEATURES)(); PGETENABLEDXSTATEFEATURES pfnGetEnabledXStateFeatures = NULL; HMODULE hMod = WszLoadLibraryEx(WINDOWS_KERNEL32_DLLNAME_W, NULL, LOAD_LIBRARY_SEARCH_SYSTEM32); if(hMod == NULL) return FALSE; pfnGetEnabledXStateFeatures = (PGETENABLEDXSTATEFEATURES)GetProcAddress(hMod, "GetEnabledXStateFeatures"); if (pfnGetEnabledXStateFeatures == NULL) { return FALSE; } DWORD64 FeatureMask = pfnGetEnabledXStateFeatures(); if ((FeatureMask & XSTATE_MASK_AVX) == 0) { return FALSE; } #endif // !TARGET_UNIX return TRUE; } #endif // defined(TARGET_X86) || defined(TARGET_AMD64) #ifdef TARGET_ARM64 extern "C" DWORD64 __stdcall GetDataCacheZeroIDReg(); #endif void EEJitManager::SetCpuInfo() { LIMITED_METHOD_CONTRACT; // // NOTE: This function needs to be kept in sync with compSetProcesor() in jit\compiler.cpp // CORJIT_FLAGS CPUCompileFlags; #if defined(TARGET_X86) CORINFO_CPU cpuInfo; GetSpecificCpuInfo(&cpuInfo); switch (CPU_X86_FAMILY(cpuInfo.dwCPUType)) { case CPU_X86_PENTIUM_4: CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_TARGET_P4); break; default: break; } if (CPU_X86_USE_CMOV(cpuInfo.dwFeatures)) { CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_USE_CMOV); CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_USE_FCOMI); } #endif // TARGET_X86 #if defined(TARGET_X86) || defined(TARGET_AMD64) CPUCompileFlags.Set(InstructionSet_X86Base); // NOTE: The below checks are based on the information reported by // Intel® 64 and IA-32 Architectures Software Developer’s Manual. Volume 2 // and // AMD64 Architecture Programmer’s Manual. Volume 3 // For more information, please refer to the CPUID instruction in the respective manuals // We will set the following flags: // CORJIT_FLAG_USE_SSE2 is required // SSE - EDX bit 25 // SSE2 - EDX bit 26 // CORJIT_FLAG_USE_AES // CORJIT_FLAG_USE_SSE2 // AES - ECX bit 25 // CORJIT_FLAG_USE_PCLMULQDQ // CORJIT_FLAG_USE_SSE2 // PCLMULQDQ - ECX bit 1 // CORJIT_FLAG_USE_SSE3 if the following feature bits are set (input EAX of 1) // CORJIT_FLAG_USE_SSE2 // SSE3 - ECX bit 0 // CORJIT_FLAG_USE_SSSE3 if the following feature bits are set (input EAX of 1) // CORJIT_FLAG_USE_SSE3 // SSSE3 - ECX bit 9 // CORJIT_FLAG_USE_SSE41 if the following feature bits are set (input EAX of 1) // CORJIT_FLAG_USE_SSSE3 // SSE4.1 - ECX bit 19 // CORJIT_FLAG_USE_SSE42 if the following feature bits are set (input EAX of 1) // CORJIT_FLAG_USE_SSE41 // SSE4.2 - ECX bit 20 // CORJIT_FLAG_USE_POPCNT if the following feature bits are set (input EAX of 1) // CORJIT_FLAG_USE_SSE42 // POPCNT - ECX bit 23 // CORJIT_FLAG_USE_AVX if the following feature bits are set (input EAX of 1), and xmmYmmStateSupport returns 1: // CORJIT_FLAG_USE_SSE42 // OSXSAVE - ECX bit 27 // AVX - ECX bit 28 // XGETBV - XCR0[2:1] 11b // CORJIT_FLAG_USE_FMA if the following feature bits are set (input EAX of 1), and xmmYmmStateSupport returns 1: // CORJIT_FLAG_USE_AVX // FMA - ECX bit 12 // CORJIT_FLAG_USE_AVX2 if the following feature bit is set (input EAX of 0x07 and input ECX of 0): // CORJIT_FLAG_USE_AVX // AVX2 - EBX bit 5 // CORJIT_FLAG_USE_AVXVNNI if the following feature bit is set (input EAX of 0x07 and input ECX of 1): // CORJIT_FLAG_USE_AVX2 // AVXVNNI - EAX bit 4 // CORJIT_FLAG_USE_AVX_512 is not currently set, but defined so that it can be used in future without // CORJIT_FLAG_USE_BMI1 if the following feature bit is set (input EAX of 0x07 and input ECX of 0): // BMI1 - EBX bit 3 // CORJIT_FLAG_USE_BMI2 if the following feature bit is set (input EAX of 0x07 and input ECX of 0): // BMI2 - EBX bit 8 // CORJIT_FLAG_USE_LZCNT if the following feature bits are set (input EAX of 80000001H) // LZCNT - ECX bit 5 // synchronously updating VM and JIT. int cpuidInfo[4]; const int EAX = CPUID_EAX; const int EBX = CPUID_EBX; const int ECX = CPUID_ECX; const int EDX = CPUID_EDX; __cpuid(cpuidInfo, 0x00000000); uint32_t maxCpuId = static_cast<uint32_t>(cpuidInfo[EAX]); if (maxCpuId >= 1) { __cpuid(cpuidInfo, 0x00000001); if (((cpuidInfo[EDX] & (1 << 25)) != 0) && ((cpuidInfo[EDX] & (1 << 26)) != 0)) // SSE & SSE2 { CPUCompileFlags.Set(InstructionSet_SSE); CPUCompileFlags.Set(InstructionSet_SSE2); if ((cpuidInfo[ECX] & (1 << 25)) != 0) // AESNI { CPUCompileFlags.Set(InstructionSet_AES); } if ((cpuidInfo[ECX] & (1 << 1)) != 0) // PCLMULQDQ { CPUCompileFlags.Set(InstructionSet_PCLMULQDQ); } if ((cpuidInfo[ECX] & (1 << 0)) != 0) // SSE3 { CPUCompileFlags.Set(InstructionSet_SSE3); if ((cpuidInfo[ECX] & (1 << 9)) != 0) // SSSE3 { CPUCompileFlags.Set(InstructionSet_SSSE3); if ((cpuidInfo[ECX] & (1 << 19)) != 0) // SSE4.1 { CPUCompileFlags.Set(InstructionSet_SSE41); if ((cpuidInfo[ECX] & (1 << 20)) != 0) // SSE4.2 { CPUCompileFlags.Set(InstructionSet_SSE42); if ((cpuidInfo[ECX] & (1 << 23)) != 0) // POPCNT { CPUCompileFlags.Set(InstructionSet_POPCNT); } if (((cpuidInfo[ECX] & (1 << 27)) != 0) && ((cpuidInfo[ECX] & (1 << 28)) != 0)) // OSXSAVE & AVX { if(DoesOSSupportAVX() && (xmmYmmStateSupport() == 1)) // XGETBV == 11 { CPUCompileFlags.Set(InstructionSet_AVX); if ((cpuidInfo[ECX] & (1 << 12)) != 0) // FMA { CPUCompileFlags.Set(InstructionSet_FMA); } if (maxCpuId >= 0x07) { __cpuidex(cpuidInfo, 0x00000007, 0x00000000); if ((cpuidInfo[EBX] & (1 << 5)) != 0) // AVX2 { CPUCompileFlags.Set(InstructionSet_AVX2); __cpuidex(cpuidInfo, 0x00000007, 0x00000001); if ((cpuidInfo[EAX] & (1 << 4)) != 0) // AVX-VNNI { CPUCompileFlags.Set(InstructionSet_AVXVNNI); } } } } } } } } } static ConfigDWORD fFeatureSIMD; if (fFeatureSIMD.val(CLRConfig::EXTERNAL_FeatureSIMD) != 0) { CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_FEATURE_SIMD); } if (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_SIMD16ByteOnly) != 0) { CPUCompileFlags.Clear(InstructionSet_AVX2); } } if (maxCpuId >= 0x07) { __cpuidex(cpuidInfo, 0x00000007, 0x00000000); if ((cpuidInfo[EBX] & (1 << 3)) != 0) // BMI1 { CPUCompileFlags.Set(InstructionSet_BMI1); } if ((cpuidInfo[EBX] & (1 << 8)) != 0) // BMI2 { CPUCompileFlags.Set(InstructionSet_BMI2); } } } __cpuid(cpuidInfo, 0x80000000); uint32_t maxCpuIdEx = static_cast<uint32_t>(cpuidInfo[EAX]); if (maxCpuIdEx >= 0x80000001) { __cpuid(cpuidInfo, 0x80000001); if ((cpuidInfo[ECX] & (1 << 5)) != 0) // LZCNT { CPUCompileFlags.Set(InstructionSet_LZCNT); } } if (!CPUCompileFlags.IsSet(InstructionSet_SSE)) { EEPOLICY_HANDLE_FATAL_ERROR_WITH_MESSAGE(COR_E_EXECUTIONENGINE, W("SSE is not supported on the processor.")); } if (!CPUCompileFlags.IsSet(InstructionSet_SSE2)) { EEPOLICY_HANDLE_FATAL_ERROR_WITH_MESSAGE(COR_E_EXECUTIONENGINE, W("SSE2 is not supported on the processor.")); } #endif // defined(TARGET_X86) || defined(TARGET_AMD64) #if defined(TARGET_ARM64) static ConfigDWORD fFeatureSIMD; if (fFeatureSIMD.val(CLRConfig::EXTERNAL_FeatureSIMD) != 0) { CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_FEATURE_SIMD); } #if defined(TARGET_UNIX) PAL_GetJitCpuCapabilityFlags(&CPUCompileFlags); // For HOST_ARM64, if OS has exposed mechanism to detect CPU capabilities, make sure it has AdvSimd capability. // For other cases i.e. if !HOST_ARM64 but TARGET_ARM64 or HOST_ARM64 but OS doesn't expose way to detect // CPU capabilities, we always enable AdvSimd flags by default. // if (!CPUCompileFlags.IsSet(InstructionSet_AdvSimd)) { EEPOLICY_HANDLE_FATAL_ERROR_WITH_MESSAGE(COR_E_EXECUTIONENGINE, W("AdvSimd is not supported on the processor.")); } #elif defined(HOST_64BIT) // FP and SIMD support are enabled by default CPUCompileFlags.Set(InstructionSet_ArmBase); CPUCompileFlags.Set(InstructionSet_AdvSimd); // PF_ARM_V8_CRYPTO_INSTRUCTIONS_AVAILABLE (30) if (IsProcessorFeaturePresent(PF_ARM_V8_CRYPTO_INSTRUCTIONS_AVAILABLE)) { CPUCompileFlags.Set(InstructionSet_Aes); CPUCompileFlags.Set(InstructionSet_Sha1); CPUCompileFlags.Set(InstructionSet_Sha256); } // PF_ARM_V8_CRC32_INSTRUCTIONS_AVAILABLE (31) if (IsProcessorFeaturePresent(PF_ARM_V8_CRC32_INSTRUCTIONS_AVAILABLE)) { CPUCompileFlags.Set(InstructionSet_Crc32); } #endif // HOST_64BIT if (GetDataCacheZeroIDReg() == 4) { // DCZID_EL0<4> (DZP) indicates whether use of DC ZVA instructions is permitted (0) or prohibited (1). // DCZID_EL0<3:0> (BS) specifies Log2 of the block size in words. // // We set the flag when the instruction is permitted and the block size is 64 bytes. CPUCompileFlags.Set(InstructionSet_Dczva); } #endif // TARGET_ARM64 // Now that we've queried the actual hardware support, we need to adjust what is actually supported based // on some externally available config switches that exist so users can test code for downlevel hardware. #if defined(TARGET_AMD64) || defined(TARGET_X86) if (!CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_EnableHWIntrinsic)) { CPUCompileFlags.Clear(InstructionSet_X86Base); } if (!CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_EnableAES)) { CPUCompileFlags.Clear(InstructionSet_AES); } if (!CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_EnableAVX)) { CPUCompileFlags.Clear(InstructionSet_AVX); } if (!CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_EnableAVX2)) { CPUCompileFlags.Clear(InstructionSet_AVX2); } if (!CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_EnableAVXVNNI)) { CPUCompileFlags.Clear(InstructionSet_AVXVNNI); } if (!CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_EnableBMI1)) { CPUCompileFlags.Clear(InstructionSet_BMI1); } if (!CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_EnableBMI2)) { CPUCompileFlags.Clear(InstructionSet_BMI2); } if (!CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_EnableFMA)) { CPUCompileFlags.Clear(InstructionSet_FMA); } if (!CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_EnableLZCNT)) { CPUCompileFlags.Clear(InstructionSet_LZCNT); } if (!CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_EnablePCLMULQDQ)) { CPUCompileFlags.Clear(InstructionSet_PCLMULQDQ); } if (!CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_EnablePOPCNT)) { CPUCompileFlags.Clear(InstructionSet_POPCNT); } if (!CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_EnableSSE)) { CPUCompileFlags.Clear(InstructionSet_SSE); } if (!CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_EnableSSE2)) { CPUCompileFlags.Clear(InstructionSet_SSE2); } // We need to additionally check that EXTERNAL_EnableSSE3_4 is set, as that // is a prexisting config flag that controls the SSE3+ ISAs if (!CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_EnableSSE3) || !CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_EnableSSE3_4)) { CPUCompileFlags.Clear(InstructionSet_SSE3); } if (!CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_EnableSSE41)) { CPUCompileFlags.Clear(InstructionSet_SSE41); } if (!CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_EnableSSE42)) { CPUCompileFlags.Clear(InstructionSet_SSE42); } if (!CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_EnableSSSE3)) { CPUCompileFlags.Clear(InstructionSet_SSSE3); } #elif defined(TARGET_ARM64) if (!CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_EnableHWIntrinsic)) { CPUCompileFlags.Clear(InstructionSet_ArmBase); } if (!CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_EnableArm64AdvSimd)) { CPUCompileFlags.Clear(InstructionSet_AdvSimd); } if (!CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_EnableArm64Aes)) { CPUCompileFlags.Clear(InstructionSet_Aes); } if (!CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_EnableArm64Atomics)) { CPUCompileFlags.Clear(InstructionSet_Atomics); } if (!CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_EnableArm64Crc32)) { CPUCompileFlags.Clear(InstructionSet_Crc32); } if (!CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_EnableArm64Dczva)) { CPUCompileFlags.Clear(InstructionSet_Dczva); } if (!CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_EnableArm64Dp)) { CPUCompileFlags.Clear(InstructionSet_Dp); } if (!CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_EnableArm64Rdm)) { CPUCompileFlags.Clear(InstructionSet_Rdm); } if (!CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_EnableArm64Sha1)) { CPUCompileFlags.Clear(InstructionSet_Sha1); } if (!CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_EnableArm64Sha256)) { CPUCompileFlags.Clear(InstructionSet_Sha256); } #endif // These calls are very important as it ensures the flags are consistent with any // removals specified above. This includes removing corresponding 64-bit ISAs // and any other implications such as SSE2 depending on SSE or AdvSimd on ArmBase CPUCompileFlags.Set64BitInstructionSetVariants(); CPUCompileFlags.EnsureValidInstructionSetSupport(); m_CPUCompileFlags = CPUCompileFlags; } // Define some data that we can use to get a better idea of what happened when we get a Watson dump that indicates the JIT failed to load. // This will be used and updated by the JIT loading and initialization functions, and the data written will get written into a Watson dump. enum JIT_LOAD_JIT_ID { JIT_LOAD_MAIN = 500, // The "main" JIT. Normally, this is named "clrjit.dll". Start at a number that is somewhat uncommon (i.e., not zero or 1) to help distinguish from garbage, in process dumps. // 501 is JIT_LOAD_LEGACY on some platforms; please do not reuse this value. JIT_LOAD_ALTJIT = 502 // An "altjit". By default, named something like "clrjit_<targetos>_<target_arch>_<host_arch>.dll". Used both internally, as well as externally for JIT CTP builds. }; enum JIT_LOAD_STATUS { JIT_LOAD_STATUS_STARTING = 1001, // The JIT load process is starting. Start at a number that is somewhat uncommon (i.e., not zero or 1) to help distinguish from garbage, in process dumps. JIT_LOAD_STATUS_DONE_LOAD, // LoadLibrary of the JIT dll succeeded. JIT_LOAD_STATUS_DONE_GET_JITSTARTUP, // GetProcAddress for "jitStartup" succeeded. JIT_LOAD_STATUS_DONE_CALL_JITSTARTUP, // Calling jitStartup() succeeded. JIT_LOAD_STATUS_DONE_GET_GETJIT, // GetProcAddress for "getJit" succeeded. JIT_LOAD_STATUS_DONE_CALL_GETJIT, // Calling getJit() succeeded. JIT_LOAD_STATUS_DONE_CALL_GETVERSIONIDENTIFIER, // Calling ICorJitCompiler::getVersionIdentifier() succeeded. JIT_LOAD_STATUS_DONE_VERSION_CHECK, // The JIT-EE version identifier check succeeded. JIT_LOAD_STATUS_DONE, // The JIT load is complete, and successful. }; struct JIT_LOAD_DATA { JIT_LOAD_JIT_ID jld_id; // Which JIT are we currently loading? JIT_LOAD_STATUS jld_status; // The current load status of a JIT load attempt. HRESULT jld_hr; // If the JIT load fails, the last jld_status will be JIT_LOAD_STATUS_STARTING. // In that case, this will contain the HRESULT returned by LoadLibrary. // Otherwise, this will be S_OK (which is zero). }; // Here's the global data for JIT load and initialization state. JIT_LOAD_DATA g_JitLoadData; // Validate that the name used to load the JIT is just a simple file name // and does not contain something that could be used in a non-qualified path. // For example, using the string "..\..\..\myjit.dll" we might attempt to // load a JIT from the root of the drive. // // The minimal set of characters that we must check for and exclude are: // '\\' - (backslash) // '/' - (forward slash) // ':' - (colon) // // Returns false if we find any of these characters in 'pwzJitName' // Returns true if we reach the null terminator without encountering // any of these characters. // static bool ValidateJitName(LPCWSTR pwzJitName) { LPCWSTR pCurChar = pwzJitName; wchar_t curChar; do { curChar = *pCurChar; if ((curChar == '\\') || (curChar == '/') || (curChar == ':')) { // Return false if we find any of these character in 'pwzJitName' return false; } pCurChar++; } while (curChar != 0); // Return true; we have reached the null terminator // return true; } CORINFO_OS getClrVmOs(); // LoadAndInitializeJIT: load the JIT dll into the process, and initialize it (call the UtilCode initialization function, // check the JIT-EE interface GUID, etc.) // // Parameters: // // pwzJitName - The filename of the JIT .dll file to load. E.g., "altjit.dll". // phJit - On return, *phJit is the Windows module handle of the loaded JIT dll. It will be NULL if the load failed. // ppICorJitCompiler - On return, *ppICorJitCompiler is the ICorJitCompiler* returned by the JIT's getJit() entrypoint. // It is NULL if the JIT returns a NULL interface pointer, or if the JIT-EE interface GUID is mismatched. // Note that if the given JIT is loaded, but the interface is mismatched, then *phJit will be legal and non-NULL // even though *ppICorJitCompiler is NULL. This allows the caller to unload the JIT dll, if necessary // (nobody does this today). // pJitLoadData - Pointer to a structure that we update as we load and initialize the JIT to indicate how far we've gotten. This // is used to help understand problems we see with JIT loading that come in via Watson dumps. Since we don't throw // an exception immediately upon failure, we can lose information about what the failure was if we don't store this // information in a way that persists into a process dump. // targetOs - Target OS for JIT // static void LoadAndInitializeJIT(LPCWSTR pwzJitName, OUT HINSTANCE* phJit, OUT ICorJitCompiler** ppICorJitCompiler, IN OUT JIT_LOAD_DATA* pJitLoadData, CORINFO_OS targetOs) { STANDARD_VM_CONTRACT; _ASSERTE(phJit != NULL); _ASSERTE(ppICorJitCompiler != NULL); _ASSERTE(pJitLoadData != NULL); pJitLoadData->jld_status = JIT_LOAD_STATUS_STARTING; pJitLoadData->jld_hr = S_OK; *phJit = NULL; *ppICorJitCompiler = NULL; if (pwzJitName == nullptr) { pJitLoadData->jld_hr = E_FAIL; LOG((LF_JIT, LL_FATALERROR, "LoadAndInitializeJIT: pwzJitName is null")); return; } HRESULT hr = E_FAIL; if (ValidateJitName(pwzJitName)) { // Load JIT from next to CoreCLR binary PathString CoreClrFolderHolder; if (GetClrModulePathName(CoreClrFolderHolder) && !CoreClrFolderHolder.IsEmpty()) { SString::Iterator iter = CoreClrFolderHolder.End(); BOOL findSep = CoreClrFolderHolder.FindBack(iter, DIRECTORY_SEPARATOR_CHAR_W); if (findSep) { SString sJitName(pwzJitName); CoreClrFolderHolder.Replace(iter + 1, CoreClrFolderHolder.End() - (iter + 1), sJitName); *phJit = CLRLoadLibrary(CoreClrFolderHolder.GetUnicode()); if (*phJit != NULL) { hr = S_OK; } } } } else { LOG((LF_JIT, LL_FATALERROR, "LoadAndInitializeJIT: invalid characters in %S\n", pwzJitName)); } if (SUCCEEDED(hr)) { pJitLoadData->jld_status = JIT_LOAD_STATUS_DONE_LOAD; EX_TRY { typedef void (* pjitStartup)(ICorJitHost*); pjitStartup jitStartupFn = (pjitStartup) GetProcAddress(*phJit, "jitStartup"); if (jitStartupFn) { pJitLoadData->jld_status = JIT_LOAD_STATUS_DONE_GET_JITSTARTUP; (*jitStartupFn)(JitHost::getJitHost()); pJitLoadData->jld_status = JIT_LOAD_STATUS_DONE_CALL_JITSTARTUP; } typedef ICorJitCompiler* (__stdcall* pGetJitFn)(); pGetJitFn getJitFn = (pGetJitFn) GetProcAddress(*phJit, "getJit"); if (getJitFn) { pJitLoadData->jld_status = JIT_LOAD_STATUS_DONE_GET_GETJIT; ICorJitCompiler* pICorJitCompiler = (*getJitFn)(); if (pICorJitCompiler != NULL) { pJitLoadData->jld_status = JIT_LOAD_STATUS_DONE_CALL_GETJIT; GUID versionId; memset(&versionId, 0, sizeof(GUID)); pICorJitCompiler->getVersionIdentifier(&versionId); pJitLoadData->jld_status = JIT_LOAD_STATUS_DONE_CALL_GETVERSIONIDENTIFIER; if (memcmp(&versionId, &JITEEVersionIdentifier, sizeof(GUID)) == 0) { pJitLoadData->jld_status = JIT_LOAD_STATUS_DONE_VERSION_CHECK; // Specify to the JIT that it is working with the OS that we are compiled against pICorJitCompiler->setTargetOS(targetOs); // The JIT has loaded and passed the version identifier test, so publish the JIT interface to the caller. *ppICorJitCompiler = pICorJitCompiler; // The JIT is completely loaded and initialized now. pJitLoadData->jld_status = JIT_LOAD_STATUS_DONE; } else { // Mismatched version ID. Fail the load. LOG((LF_JIT, LL_FATALERROR, "LoadAndInitializeJIT: mismatched JIT version identifier in %S\n", pwzJitName)); } } else { LOG((LF_JIT, LL_FATALERROR, "LoadAndInitializeJIT: failed to get ICorJitCompiler in %S\n", pwzJitName)); } } else { LOG((LF_JIT, LL_FATALERROR, "LoadAndInitializeJIT: failed to find 'getJit' entrypoint in %S\n", pwzJitName)); } } EX_CATCH { LOG((LF_JIT, LL_FATALERROR, "LoadAndInitializeJIT: caught an exception trying to initialize %S\n", pwzJitName)); } EX_END_CATCH(SwallowAllExceptions) } else { pJitLoadData->jld_hr = hr; LOG((LF_JIT, LL_FATALERROR, "LoadAndInitializeJIT: failed to load %S, hr=0x%08x\n", pwzJitName, hr)); } } #ifdef FEATURE_MERGE_JIT_AND_ENGINE EXTERN_C void jitStartup(ICorJitHost* host); EXTERN_C ICorJitCompiler* getJit(); #endif // FEATURE_MERGE_JIT_AND_ENGINE BOOL EEJitManager::LoadJIT() { STANDARD_VM_CONTRACT; // If the JIT is already loaded, don't take the lock. if (IsJitLoaded()) return TRUE; // Use m_JitLoadCritSec to ensure that the JIT is loaded on one thread only CrstHolder chRead(&m_JitLoadCritSec); // Did someone load the JIT before we got the lock? if (IsJitLoaded()) return TRUE; SetCpuInfo(); ICorJitCompiler* newJitCompiler = NULL; #ifdef FEATURE_MERGE_JIT_AND_ENGINE EX_TRY { jitStartup(JitHost::getJitHost()); newJitCompiler = getJit(); // We don't need to call getVersionIdentifier(), since the JIT is linked together with the VM. } EX_CATCH { } EX_END_CATCH(SwallowAllExceptions) #else // !FEATURE_MERGE_JIT_AND_ENGINE m_JITCompiler = NULL; #if defined(TARGET_X86) || defined(TARGET_AMD64) m_JITCompilerOther = NULL; #endif g_JitLoadData.jld_id = JIT_LOAD_MAIN; LoadAndInitializeJIT(ExecutionManager::GetJitName(), &m_JITCompiler, &newJitCompiler, &g_JitLoadData, getClrVmOs()); #endif // !FEATURE_MERGE_JIT_AND_ENGINE #ifdef ALLOW_SXS_JIT // Do not load altjit.dll unless COMPlus_AltJit is set. // Even if the main JIT fails to load, if the user asks for an altjit we try to load it. // This allows us to display load error messages for loading altjit. ICorJitCompiler* newAltJitCompiler = NULL; LPWSTR altJitConfig; IfFailThrow(CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_AltJit, &altJitConfig)); m_AltJITCompiler = NULL; if (altJitConfig != NULL) { // Load the altjit into the system. // Note: altJitName must be declared as a const otherwise assigning the string // constructed by MAKEDLLNAME_W() to altJitName will cause a build break on Unix. LPCWSTR altJitName; IfFailThrow(CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_AltJitName, (LPWSTR*)&altJitName)); if (altJitName == NULL) { #ifdef TARGET_WINDOWS #ifdef TARGET_X86 altJitName = MAKEDLLNAME_W(W("clrjit_win_x86_x86")); #elif defined(TARGET_AMD64) altJitName = MAKEDLLNAME_W(W("clrjit_win_x64_x64")); #endif #else // TARGET_WINDOWS #ifdef TARGET_X86 altJitName = MAKEDLLNAME_W(W("clrjit_unix_x86_x86")); #elif defined(TARGET_AMD64) altJitName = MAKEDLLNAME_W(W("clrjit_unix_x64_x64")); #endif #endif // TARGET_WINDOWS #if defined(TARGET_ARM) altJitName = MAKEDLLNAME_W(W("clrjit_universal_arm_arm")); #elif defined(TARGET_ARM64) altJitName = MAKEDLLNAME_W(W("clrjit_universal_arm64_arm64")); #endif // TARGET_ARM } CORINFO_OS targetOs = getClrVmOs(); LPWSTR altJitOsConfig; IfFailThrow(CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_AltJitOs, &altJitOsConfig)); if (altJitOsConfig != NULL) { // We have some inconsistency all over the place with osx vs macos, let's handle both here if ((_wcsicmp(altJitOsConfig, W("macos")) == 0) || (_wcsicmp(altJitOsConfig, W("osx")) == 0)) { targetOs = CORINFO_MACOS; } else if ((_wcsicmp(altJitOsConfig, W("linux")) == 0) || (_wcsicmp(altJitOsConfig, W("unix")) == 0)) { targetOs = CORINFO_UNIX; } else if (_wcsicmp(altJitOsConfig, W("windows")) == 0) { targetOs = CORINFO_WINNT; } else { _ASSERTE(!"Unknown AltJitOS, it has to be either Windows, Linux or macOS"); } } g_JitLoadData.jld_id = JIT_LOAD_ALTJIT; LoadAndInitializeJIT(altJitName, &m_AltJITCompiler, &newAltJitCompiler, &g_JitLoadData, targetOs); } #endif // ALLOW_SXS_JIT // Publish the compilers. #ifdef ALLOW_SXS_JIT m_AltJITRequired = (altJitConfig != NULL); m_alternateJit = newAltJitCompiler; #endif // ALLOW_SXS_JIT m_jit = newJitCompiler; // Failing to load the main JIT is a failure. // If the user requested an altjit and we failed to load an altjit, that is also a failure. // In either failure case, we'll rip down the VM (so no need to clean up (unload) either JIT that did load successfully. return IsJitLoaded(); } //************************************************************************** CodeFragmentHeap::CodeFragmentHeap(LoaderAllocator * pAllocator, StubCodeBlockKind kind) : m_pAllocator(pAllocator), m_pFreeBlocks(NULL), m_kind(kind), // CRST_DEBUGGER_THREAD - We take this lock on debugger thread during EnC add meth m_CritSec(CrstCodeFragmentHeap, CrstFlags(CRST_UNSAFE_ANYMODE | CRST_DEBUGGER_THREAD)) { WRAPPER_NO_CONTRACT; } void CodeFragmentHeap::AddBlock(VOID * pMem, size_t dwSize) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; // The new "nothrow" below failure is handled in a non-fault way, so // make sure that callers with FORBID_FAULT can call this method without // firing the contract violation assert. PERMANENT_CONTRACT_VIOLATION(FaultViolation, ReasonContractInfrastructure); FreeBlock * pBlock = new (nothrow) FreeBlock; // In the OOM case we don't add the block to the list of free blocks // as we are in a FORBID_FAULT code path. if (pBlock != NULL) { pBlock->m_pNext = m_pFreeBlocks; pBlock->m_pBlock = pMem; pBlock->m_dwSize = dwSize; m_pFreeBlocks = pBlock; } } void CodeFragmentHeap::RemoveBlock(FreeBlock ** ppBlock) { LIMITED_METHOD_CONTRACT; FreeBlock * pBlock = *ppBlock; *ppBlock = pBlock->m_pNext; delete pBlock; } CodeFragmentHeap::~CodeFragmentHeap() { FreeBlock* pBlock = m_pFreeBlocks; while (pBlock != NULL) { FreeBlock *pNextBlock = pBlock->m_pNext; delete pBlock; pBlock = pNextBlock; } } TaggedMemAllocPtr CodeFragmentHeap::RealAllocAlignedMem(size_t dwRequestedSize ,unsigned dwAlignment #ifdef _DEBUG ,_In_ _In_z_ const char *szFile ,int lineNum #endif ) { CrstHolder ch(&m_CritSec); dwRequestedSize = ALIGN_UP(dwRequestedSize, sizeof(TADDR)); // We will try to batch up allocation of small blocks into one large allocation #define SMALL_BLOCK_THRESHOLD 0x100 SIZE_T nFreeSmallBlocks = 0; FreeBlock ** ppBestFit = NULL; FreeBlock ** ppFreeBlock = &m_pFreeBlocks; while (*ppFreeBlock != NULL) { FreeBlock * pFreeBlock = *ppFreeBlock; if (((BYTE *)pFreeBlock->m_pBlock + pFreeBlock->m_dwSize) - (BYTE *)ALIGN_UP(pFreeBlock->m_pBlock, dwAlignment) >= (SSIZE_T)dwRequestedSize) { if (ppBestFit == NULL || pFreeBlock->m_dwSize < (*ppBestFit)->m_dwSize) ppBestFit = ppFreeBlock; } else { if (pFreeBlock->m_dwSize < SMALL_BLOCK_THRESHOLD) nFreeSmallBlocks++; } ppFreeBlock = &(*ppFreeBlock)->m_pNext; } VOID * pMem; SIZE_T dwSize; if (ppBestFit != NULL) { pMem = (*ppBestFit)->m_pBlock; dwSize = (*ppBestFit)->m_dwSize; RemoveBlock(ppBestFit); } else { dwSize = dwRequestedSize; if (dwSize < SMALL_BLOCK_THRESHOLD) dwSize = 4 * SMALL_BLOCK_THRESHOLD; pMem = ExecutionManager::GetEEJitManager()->allocCodeFragmentBlock(dwSize, dwAlignment, m_pAllocator, m_kind); } SIZE_T dwExtra = (BYTE *)ALIGN_UP(pMem, dwAlignment) - (BYTE *)pMem; _ASSERTE(dwSize >= dwExtra + dwRequestedSize); SIZE_T dwRemaining = dwSize - (dwExtra + dwRequestedSize); // Avoid accumulation of too many small blocks. The more small free blocks we have, the more picky we are going to be about adding new ones. if ((dwRemaining >= max(sizeof(FreeBlock), sizeof(StubPrecode)) + (SMALL_BLOCK_THRESHOLD / 0x10) * nFreeSmallBlocks) || (dwRemaining >= SMALL_BLOCK_THRESHOLD)) { AddBlock((BYTE *)pMem + dwExtra + dwRequestedSize, dwRemaining); dwSize -= dwRemaining; } TaggedMemAllocPtr tmap; tmap.m_pMem = pMem; tmap.m_dwRequestedSize = dwSize; tmap.m_pHeap = this; tmap.m_dwExtra = dwExtra; #ifdef _DEBUG tmap.m_szFile = szFile; tmap.m_lineNum = lineNum; #endif return tmap; } void CodeFragmentHeap::RealBackoutMem(void *pMem , size_t dwSize #ifdef _DEBUG , _In_ _In_z_ const char *szFile , int lineNum , _In_ _In_z_ const char *szAllocFile , int allocLineNum #endif ) { CrstHolder ch(&m_CritSec); { ExecutableWriterHolder<BYTE> memWriterHolder((BYTE*)pMem, dwSize); ZeroMemory(memWriterHolder.GetRW(), dwSize); } // // Try to coalesce blocks if possible // FreeBlock ** ppFreeBlock = &m_pFreeBlocks; while (*ppFreeBlock != NULL) { FreeBlock * pFreeBlock = *ppFreeBlock; if ((BYTE *)pFreeBlock == (BYTE *)pMem + dwSize) { // pMem = pMem; dwSize += pFreeBlock->m_dwSize; RemoveBlock(ppFreeBlock); continue; } else if ((BYTE *)pFreeBlock + pFreeBlock->m_dwSize == (BYTE *)pMem) { pMem = pFreeBlock; dwSize += pFreeBlock->m_dwSize; RemoveBlock(ppFreeBlock); continue; } ppFreeBlock = &(*ppFreeBlock)->m_pNext; } AddBlock(pMem, dwSize); } //************************************************************************** LoaderCodeHeap::LoaderCodeHeap() : m_LoaderHeap(NULL, // RangeList *pRangeList TRUE), // BOOL fMakeExecutable m_cbMinNextPad(0) { WRAPPER_NO_CONTRACT; } void ThrowOutOfMemoryWithinRange() { CONTRACTL { THROWS; GC_NOTRIGGER; } CONTRACTL_END; // Allow breaking into debugger or terminating the process when this exception occurs switch (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_BreakOnOutOfMemoryWithinRange)) { case 1: DebugBreak(); break; case 2: EEPOLICY_HANDLE_FATAL_ERROR(COR_E_OUTOFMEMORY); break; default: break; } EX_THROW(EEMessageException, (kOutOfMemoryException, IDS_EE_OUT_OF_MEMORY_WITHIN_RANGE)); } #ifdef TARGET_AMD64 BYTE * EEJitManager::AllocateFromEmergencyJumpStubReserve(const BYTE * loAddr, const BYTE * hiAddr, SIZE_T * pReserveSize) { CONTRACTL { NOTHROW; GC_NOTRIGGER; PRECONDITION(m_CodeHeapCritSec.OwnedByCurrentThread()); } CONTRACTL_END; for (EmergencyJumpStubReserve ** ppPrev = &m_pEmergencyJumpStubReserveList; *ppPrev != NULL; ppPrev = &(*ppPrev)->m_pNext) { EmergencyJumpStubReserve * pList = *ppPrev; if (loAddr <= pList->m_ptr && pList->m_ptr + pList->m_size < hiAddr) { *ppPrev = pList->m_pNext; BYTE * pBlock = pList->m_ptr; *pReserveSize = pList->m_size; delete pList; return pBlock; } } return NULL; } VOID EEJitManager::EnsureJumpStubReserve(BYTE * pImageBase, SIZE_T imageSize, SIZE_T reserveSize) { CONTRACTL { THROWS; GC_NOTRIGGER; } CONTRACTL_END; CrstHolder ch(&m_CodeHeapCritSec); BYTE * loAddr = pImageBase + imageSize + INT32_MIN; if (loAddr > pImageBase) loAddr = NULL; // overflow BYTE * hiAddr = pImageBase + INT32_MAX; if (hiAddr < pImageBase) hiAddr = (BYTE *)UINT64_MAX; // overflow for (EmergencyJumpStubReserve * pList = m_pEmergencyJumpStubReserveList; pList != NULL; pList = pList->m_pNext) { if (loAddr <= pList->m_ptr && pList->m_ptr + pList->m_size < hiAddr) { SIZE_T used = min(reserveSize, pList->m_free); pList->m_free -= used; reserveSize -= used; if (reserveSize == 0) return; } } // Try several different strategies - the most efficient one first int allocMode = 0; // Try to reserve at least 16MB at a time SIZE_T allocChunk = max(ALIGN_UP(reserveSize, VIRTUAL_ALLOC_RESERVE_GRANULARITY), 16*1024*1024); while (reserveSize > 0) { NewHolder<EmergencyJumpStubReserve> pNewReserve(new EmergencyJumpStubReserve()); for (;;) { BYTE * loAddrCurrent = loAddr; BYTE * hiAddrCurrent = hiAddr; switch (allocMode) { case 0: // First, try to allocate towards the center of the allowed range. It is more likely to // satisfy subsequent reservations. loAddrCurrent = loAddr + (hiAddr - loAddr) / 8; hiAddrCurrent = hiAddr - (hiAddr - loAddr) / 8; break; case 1: // Try the whole allowed range break; case 2: // If the large allocation failed, retry with small chunk size allocChunk = VIRTUAL_ALLOC_RESERVE_GRANULARITY; break; default: return; // Unable to allocate the reserve - give up } pNewReserve->m_ptr = (BYTE*)ExecutableAllocator::Instance()->ReserveWithinRange(allocChunk, loAddrCurrent, hiAddrCurrent); if (pNewReserve->m_ptr != NULL) break; // Retry with the next allocation strategy allocMode++; } SIZE_T used = min(allocChunk, reserveSize); reserveSize -= used; pNewReserve->m_size = allocChunk; pNewReserve->m_free = allocChunk - used; // Add it to the list pNewReserve->m_pNext = m_pEmergencyJumpStubReserveList; m_pEmergencyJumpStubReserveList = pNewReserve.Extract(); } } #endif // TARGET_AMD64 static size_t GetDefaultReserveForJumpStubs(size_t codeHeapSize) { LIMITED_METHOD_CONTRACT; #if defined(TARGET_AMD64) || defined(TARGET_ARM64) // // Keep a small default reserve at the end of the codeheap for jump stubs. It should reduce // chance that we won't be able allocate jump stub because of lack of suitable address space. // static ConfigDWORD configCodeHeapReserveForJumpStubs; int percentReserveForJumpStubs = configCodeHeapReserveForJumpStubs.val(CLRConfig::INTERNAL_CodeHeapReserveForJumpStubs); size_t reserveForJumpStubs = percentReserveForJumpStubs * (codeHeapSize / 100); size_t minReserveForJumpStubs = sizeof(CodeHeader) + sizeof(JumpStubBlockHeader) + (size_t) DEFAULT_JUMPSTUBS_PER_BLOCK * BACK_TO_BACK_JUMP_ALLOCATE_SIZE + CODE_SIZE_ALIGN + BYTES_PER_BUCKET; return max(reserveForJumpStubs, minReserveForJumpStubs); #else return 0; #endif } HeapList* LoaderCodeHeap::CreateCodeHeap(CodeHeapRequestInfo *pInfo, LoaderHeap *pJitMetaHeap) { CONTRACT(HeapList *) { THROWS; GC_NOTRIGGER; POSTCONDITION((RETVAL != NULL) || !pInfo->getThrowOnOutOfMemoryWithinRange()); } CONTRACT_END; size_t reserveSize = pInfo->getReserveSize(); size_t initialRequestSize = pInfo->getRequestSize(); const BYTE * loAddr = pInfo->m_loAddr; const BYTE * hiAddr = pInfo->m_hiAddr; // Make sure that what we are reserving will fix inside a DWORD if (reserveSize != (DWORD) reserveSize) { _ASSERTE(!"reserveSize does not fit in a DWORD"); EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE); } LOG((LF_JIT, LL_INFO100, "Request new LoaderCodeHeap::CreateCodeHeap(%08x, %08x, for loader allocator" FMT_ADDR "in" FMT_ADDR ".." FMT_ADDR ")\n", (DWORD) reserveSize, (DWORD) initialRequestSize, DBG_ADDR(pInfo->m_pAllocator), DBG_ADDR(loAddr), DBG_ADDR(hiAddr) )); NewHolder<LoaderCodeHeap> pCodeHeap(new LoaderCodeHeap()); BYTE * pBaseAddr = NULL; DWORD dwSizeAcquiredFromInitialBlock = 0; bool fAllocatedFromEmergencyJumpStubReserve = false; pBaseAddr = (BYTE *)pInfo->m_pAllocator->GetCodeHeapInitialBlock(loAddr, hiAddr, (DWORD)initialRequestSize, &dwSizeAcquiredFromInitialBlock); if (pBaseAddr != NULL) { pCodeHeap->m_LoaderHeap.SetReservedRegion(pBaseAddr, dwSizeAcquiredFromInitialBlock, FALSE); } else { if (loAddr != NULL || hiAddr != NULL) { #ifdef _DEBUG // Always exercise the fallback path in the caller when forced relocs are turned on if (!pInfo->getThrowOnOutOfMemoryWithinRange() && PEDecoder::GetForceRelocs()) RETURN NULL; #endif pBaseAddr = (BYTE*)ExecutableAllocator::Instance()->ReserveWithinRange(reserveSize, loAddr, hiAddr); if (!pBaseAddr) { // Conserve emergency jump stub reserve until when it is really needed if (!pInfo->getThrowOnOutOfMemoryWithinRange()) RETURN NULL; #ifdef TARGET_AMD64 pBaseAddr = ExecutionManager::GetEEJitManager()->AllocateFromEmergencyJumpStubReserve(loAddr, hiAddr, &reserveSize); if (!pBaseAddr) ThrowOutOfMemoryWithinRange(); fAllocatedFromEmergencyJumpStubReserve = true; #else ThrowOutOfMemoryWithinRange(); #endif // TARGET_AMD64 } } else { pBaseAddr = (BYTE*)ExecutableAllocator::Instance()->Reserve(reserveSize); if (!pBaseAddr) ThrowOutOfMemory(); } pCodeHeap->m_LoaderHeap.SetReservedRegion(pBaseAddr, reserveSize, TRUE); } // this first allocation is critical as it sets up correctly the loader heap info HeapList *pHp = new HeapList; #if defined(TARGET_AMD64) || defined(TARGET_ARM64) pHp->CLRPersonalityRoutine = (BYTE *)pCodeHeap->m_LoaderHeap.AllocMem(JUMP_ALLOCATE_SIZE); #else // Ensure that the heap has a reserved block of memory and so the GetReservedBytesFree() // and GetAllocPtr() calls below return nonzero values. pCodeHeap->m_LoaderHeap.ReservePages(1); #endif pHp->pHeap = pCodeHeap; size_t heapSize = pCodeHeap->m_LoaderHeap.GetReservedBytesFree(); size_t nibbleMapSize = HEAP2MAPSIZE(ROUND_UP_TO_PAGE(heapSize)); pHp->startAddress = (TADDR)pCodeHeap->m_LoaderHeap.GetAllocPtr(); pHp->endAddress = pHp->startAddress; pHp->maxCodeHeapSize = heapSize; pHp->reserveForJumpStubs = fAllocatedFromEmergencyJumpStubReserve ? pHp->maxCodeHeapSize : GetDefaultReserveForJumpStubs(pHp->maxCodeHeapSize); _ASSERTE(heapSize >= initialRequestSize); // We do not need to memset this memory, since ClrVirtualAlloc() guarantees that the memory is zero. // Furthermore, if we avoid writing to it, these pages don't come into our working set pHp->mapBase = ROUND_DOWN_TO_PAGE(pHp->startAddress); // round down to next lower page align pHp->pHdrMap = (DWORD*)(void*)pJitMetaHeap->AllocMem(S_SIZE_T(nibbleMapSize)); LOG((LF_JIT, LL_INFO100, "Created new CodeHeap(" FMT_ADDR ".." FMT_ADDR ")\n", DBG_ADDR(pHp->startAddress), DBG_ADDR(pHp->startAddress+pHp->maxCodeHeapSize) )); #ifdef TARGET_64BIT ExecutableWriterHolder<BYTE> personalityRoutineWriterHolder(pHp->CLRPersonalityRoutine, 12); emitJump(pHp->CLRPersonalityRoutine, personalityRoutineWriterHolder.GetRW(), (void *)ProcessCLRException); #endif // TARGET_64BIT pCodeHeap.SuppressRelease(); RETURN pHp; } void * LoaderCodeHeap::AllocMemForCode_NoThrow(size_t header, size_t size, DWORD alignment, size_t reserveForJumpStubs) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; if (m_cbMinNextPad > (SSIZE_T)header) header = m_cbMinNextPad; void * p = m_LoaderHeap.AllocMemForCode_NoThrow(header, size, alignment, reserveForJumpStubs); if (p == NULL) return NULL; // If the next allocation would have started in the same nibble map entry, allocate extra space to prevent it from happening // Note that m_cbMinNextPad can be negative m_cbMinNextPad = ALIGN_UP((SIZE_T)p + 1, BYTES_PER_BUCKET) - ((SIZE_T)p + size); return p; } void CodeHeapRequestInfo::Init() { CONTRACTL { NOTHROW; GC_NOTRIGGER; PRECONDITION((m_hiAddr == 0) || ((m_loAddr < m_hiAddr) && ((m_loAddr + m_requestSize) < m_hiAddr))); } CONTRACTL_END; if (m_pAllocator == NULL) m_pAllocator = m_pMD->GetLoaderAllocator(); m_isDynamicDomain = (m_pMD != NULL) && m_pMD->IsLCGMethod(); m_isCollectible = m_pAllocator->IsCollectible(); m_throwOnOutOfMemoryWithinRange = true; } #ifdef FEATURE_EH_FUNCLETS #ifdef HOST_64BIT extern "C" PT_RUNTIME_FUNCTION GetRuntimeFunctionCallback(IN ULONG64 ControlPc, IN PVOID Context) #else extern "C" PT_RUNTIME_FUNCTION GetRuntimeFunctionCallback(IN ULONG ControlPc, IN PVOID Context) #endif { WRAPPER_NO_CONTRACT; PT_RUNTIME_FUNCTION prf = NULL; // We must preserve this so that GCStress=4 eh processing doesnt kill last error. BEGIN_PRESERVE_LAST_ERROR; #ifdef ENABLE_CONTRACTS // Some 64-bit OOM tests use the hosting interface to re-enter the CLR via // RtlVirtualUnwind to track unique stacks at each failure point. RtlVirtualUnwind can // result in the EEJitManager taking a reader lock. This, in turn, results in a // CANNOT_TAKE_LOCK contract violation if a CANNOT_TAKE_LOCK function were on the stack // at the time. While it's theoretically possible for "real" hosts also to re-enter the // CLR via RtlVirtualUnwind, generally they don't, and we'd actually like to catch a real // host causing such a contract violation. Therefore, we'd like to suppress such contract // asserts when these OOM tests are running, but continue to enforce the contracts by // default. This function returns whether to suppress locking violations. CONDITIONAL_CONTRACT_VIOLATION( TakesLockViolation, g_pConfig->SuppressLockViolationsOnReentryFromOS()); #endif // ENABLE_CONTRACTS EECodeInfo codeInfo((PCODE)ControlPc); if (codeInfo.IsValid()) prf = codeInfo.GetFunctionEntry(); LOG((LF_EH, LL_INFO1000000, "GetRuntimeFunctionCallback(%p) returned %p\n", ControlPc, prf)); END_PRESERVE_LAST_ERROR; return prf; } #endif // FEATURE_EH_FUNCLETS HeapList* EEJitManager::NewCodeHeap(CodeHeapRequestInfo *pInfo, DomainCodeHeapList *pADHeapList) { CONTRACT(HeapList *) { THROWS; GC_NOTRIGGER; PRECONDITION(m_CodeHeapCritSec.OwnedByCurrentThread()); POSTCONDITION((RETVAL != NULL) || !pInfo->getThrowOnOutOfMemoryWithinRange()); } CONTRACT_END; size_t initialRequestSize = pInfo->getRequestSize(); size_t minReserveSize = VIRTUAL_ALLOC_RESERVE_GRANULARITY; // ( 64 KB) #ifdef HOST_64BIT if (pInfo->m_hiAddr == 0) { if (pADHeapList->m_CodeHeapList.Count() > CODE_HEAP_SIZE_INCREASE_THRESHOLD) { minReserveSize *= 4; // Increase the code heap size to 256 KB for workloads with a lot of code. } // For non-DynamicDomains that don't have a loAddr/hiAddr range // we bump up the reserve size for the 64-bit platforms if (!pInfo->IsDynamicDomain()) { minReserveSize *= 8; // CodeHeaps are larger on AMD64 (256 KB to 2048 KB) } } #endif size_t reserveSize = initialRequestSize; #if defined(TARGET_AMD64) || defined(TARGET_ARM64) reserveSize += JUMP_ALLOCATE_SIZE; #endif if (reserveSize < minReserveSize) reserveSize = minReserveSize; reserveSize = ALIGN_UP(reserveSize, VIRTUAL_ALLOC_RESERVE_GRANULARITY); pInfo->setReserveSize(reserveSize); HeapList *pHp = NULL; DWORD flags = RangeSection::RANGE_SECTION_CODEHEAP; if (pInfo->IsDynamicDomain()) { flags |= RangeSection::RANGE_SECTION_COLLECTIBLE; pHp = HostCodeHeap::CreateCodeHeap(pInfo, this); } else { LoaderHeap *pJitMetaHeap = pADHeapList->m_pAllocator->GetLowFrequencyHeap(); if (pInfo->IsCollectible()) flags |= RangeSection::RANGE_SECTION_COLLECTIBLE; pHp = LoaderCodeHeap::CreateCodeHeap(pInfo, pJitMetaHeap); } if (pHp == NULL) { _ASSERTE(!pInfo->getThrowOnOutOfMemoryWithinRange()); RETURN(NULL); } _ASSERTE (pHp != NULL); _ASSERTE (pHp->maxCodeHeapSize >= initialRequestSize); pHp->SetNext(GetCodeHeapList()); EX_TRY { TADDR pStartRange = pHp->GetModuleBase(); TADDR pEndRange = (TADDR) &((BYTE*)pHp->startAddress)[pHp->maxCodeHeapSize]; ExecutionManager::AddCodeRange(pStartRange, pEndRange, this, (RangeSection::RangeSectionFlags)flags, pHp); // // add a table to cover each range in the range list // InstallEEFunctionTable( (PVOID)pStartRange, // this is just an ID that gets passed to RtlDeleteFunctionTable; (PVOID)pStartRange, (ULONG)((ULONG64)pEndRange - (ULONG64)pStartRange), GetRuntimeFunctionCallback, this, DYNFNTABLE_JIT); } EX_CATCH { // If we failed to alloc memory in ExecutionManager::AddCodeRange() // then we will delete the LoaderHeap that we allocated delete pHp->pHeap; delete pHp; pHp = NULL; } EX_END_CATCH(SwallowAllExceptions) if (pHp == NULL) { ThrowOutOfMemory(); } m_pCodeHeap = pHp; HeapList **ppHeapList = pADHeapList->m_CodeHeapList.AppendThrowing(); *ppHeapList = pHp; RETURN(pHp); } void* EEJitManager::allocCodeRaw(CodeHeapRequestInfo *pInfo, size_t header, size_t blockSize, unsigned align, HeapList ** ppCodeHeap) { CONTRACT(void *) { THROWS; GC_NOTRIGGER; PRECONDITION(m_CodeHeapCritSec.OwnedByCurrentThread()); POSTCONDITION((RETVAL != NULL) || !pInfo->getThrowOnOutOfMemoryWithinRange()); } CONTRACT_END; pInfo->setRequestSize(header+blockSize+(align-1)+pInfo->getReserveForJumpStubs()); void * mem = NULL; HeapList * pCodeHeap = NULL; DomainCodeHeapList *pList = NULL; // Avoid going through the full list in the common case - try to use the most recently used codeheap if (pInfo->IsDynamicDomain()) { pCodeHeap = (HeapList *)pInfo->m_pAllocator->m_pLastUsedDynamicCodeHeap; pInfo->m_pAllocator->m_pLastUsedDynamicCodeHeap = NULL; } else { pCodeHeap = (HeapList *)pInfo->m_pAllocator->m_pLastUsedCodeHeap; pInfo->m_pAllocator->m_pLastUsedCodeHeap = NULL; } // If we will use a cached code heap, ensure that the code heap meets the constraints if (pCodeHeap && CanUseCodeHeap(pInfo, pCodeHeap)) { mem = (pCodeHeap->pHeap)->AllocMemForCode_NoThrow(header, blockSize, align, pInfo->getReserveForJumpStubs()); } if (mem == NULL) { pList = GetCodeHeapList(pInfo, pInfo->m_pAllocator); if (pList != NULL) { for (int i = 0; i < pList->m_CodeHeapList.Count(); i++) { pCodeHeap = pList->m_CodeHeapList[i]; // Validate that the code heap can be used for the current request if (CanUseCodeHeap(pInfo, pCodeHeap)) { mem = (pCodeHeap->pHeap)->AllocMemForCode_NoThrow(header, blockSize, align, pInfo->getReserveForJumpStubs()); if (mem != NULL) break; } } } if (mem == NULL) { // Let us create a new heap. if (pList == NULL) { // not found so need to create the first one pList = CreateCodeHeapList(pInfo); _ASSERTE(pList == GetCodeHeapList(pInfo, pInfo->m_pAllocator)); } _ASSERTE(pList); pCodeHeap = NewCodeHeap(pInfo, pList); if (pCodeHeap == NULL) { _ASSERTE(!pInfo->getThrowOnOutOfMemoryWithinRange()); RETURN(NULL); } mem = (pCodeHeap->pHeap)->AllocMemForCode_NoThrow(header, blockSize, align, pInfo->getReserveForJumpStubs()); if (mem == NULL) ThrowOutOfMemory(); _ASSERTE(mem); } } if (pInfo->IsDynamicDomain()) { pInfo->m_pAllocator->m_pLastUsedDynamicCodeHeap = pCodeHeap; } else { pInfo->m_pAllocator->m_pLastUsedCodeHeap = pCodeHeap; } // Record the pCodeHeap value into ppCodeHeap *ppCodeHeap = pCodeHeap; _ASSERTE((TADDR)mem >= pCodeHeap->startAddress); if (((TADDR) mem)+blockSize > (TADDR)pCodeHeap->endAddress) { // Update the CodeHeap endAddress pCodeHeap->endAddress = (TADDR)mem+blockSize; } RETURN(mem); } void EEJitManager::allocCode(MethodDesc* pMD, size_t blockSize, size_t reserveForJumpStubs, CorJitAllocMemFlag flag, CodeHeader** ppCodeHeader, CodeHeader** ppCodeHeaderRW, size_t* pAllocatedSize, HeapList** ppCodeHeap #ifdef USE_INDIRECT_CODEHEADER , BYTE** ppRealHeader #endif #ifdef FEATURE_EH_FUNCLETS , UINT nUnwindInfos #endif ) { CONTRACTL { THROWS; GC_NOTRIGGER; } CONTRACTL_END; // // Alignment // unsigned alignment = CODE_SIZE_ALIGN; if ((flag & CORJIT_ALLOCMEM_FLG_32BYTE_ALIGN) != 0) { alignment = max(alignment, 32); } else if ((flag & CORJIT_ALLOCMEM_FLG_16BYTE_ALIGN) != 0) { alignment = max(alignment, 16); } #if defined(TARGET_X86) // when not optimizing for code size, 8-byte align the method entry point, so that // the JIT can in turn 8-byte align the loop entry headers. else if ((g_pConfig->GenOptimizeType() != OPT_SIZE)) { alignment = max(alignment, 8); } #endif // // Compute header layout // SIZE_T totalSize = blockSize; CodeHeader * pCodeHdr = NULL; CodeHeader * pCodeHdrRW = NULL; CodeHeapRequestInfo requestInfo(pMD); #if defined(FEATURE_JIT_PITCHING) if (pMD && pMD->IsPitchable() && CLRConfig::GetConfigValue(CLRConfig::INTERNAL_JitPitchMethodSizeThreshold) < blockSize) { requestInfo.SetDynamicDomain(); } #endif requestInfo.setReserveForJumpStubs(reserveForJumpStubs); #if defined(USE_INDIRECT_CODEHEADER) SIZE_T realHeaderSize = offsetof(RealCodeHeader, unwindInfos[0]) + (sizeof(T_RUNTIME_FUNCTION) * nUnwindInfos); // if this is a LCG method then we will be allocating the RealCodeHeader // following the code so that the code block can be removed easily by // the LCG code heap. if (requestInfo.IsDynamicDomain()) { totalSize = ALIGN_UP(totalSize, sizeof(void*)) + realHeaderSize; static_assert_no_msg(CODE_SIZE_ALIGN >= sizeof(void*)); } #endif // USE_INDIRECT_CODEHEADER // Scope the lock { CrstHolder ch(&m_CodeHeapCritSec); *ppCodeHeap = NULL; TADDR pCode = (TADDR) allocCodeRaw(&requestInfo, sizeof(CodeHeader), totalSize, alignment, ppCodeHeap); _ASSERTE(*ppCodeHeap); if (pMD->IsLCGMethod()) { pMD->AsDynamicMethodDesc()->GetLCGMethodResolver()->m_recordCodePointer = (void*) pCode; } _ASSERTE(IS_ALIGNED(pCode, alignment)); pCodeHdr = ((CodeHeader *)pCode) - 1; *pAllocatedSize = sizeof(CodeHeader) + totalSize; if (ExecutableAllocator::IsWXORXEnabled()) { pCodeHdrRW = (CodeHeader *)new BYTE[*pAllocatedSize]; } else { pCodeHdrRW = pCodeHdr; } #ifdef USE_INDIRECT_CODEHEADER if (requestInfo.IsDynamicDomain()) { // Set the real code header to the writeable mapping so that we can set its members via the CodeHeader methods below pCodeHdrRW->SetRealCodeHeader((BYTE *)(pCodeHdrRW + 1) + ALIGN_UP(blockSize, sizeof(void*))); } else { // TODO: think about the CodeHeap carrying around a RealCodeHeader chunking mechanism // // allocate the real header in the low frequency heap BYTE* pRealHeader = (BYTE*)(void*)pMD->GetLoaderAllocator()->GetLowFrequencyHeap()->AllocMem(S_SIZE_T(realHeaderSize)); pCodeHdrRW->SetRealCodeHeader(pRealHeader); } #endif pCodeHdrRW->SetDebugInfo(NULL); pCodeHdrRW->SetEHInfo(NULL); pCodeHdrRW->SetGCInfo(NULL); pCodeHdrRW->SetMethodDesc(pMD); #ifdef FEATURE_EH_FUNCLETS pCodeHdrRW->SetNumberOfUnwindInfos(nUnwindInfos); #endif #ifdef USE_INDIRECT_CODEHEADER if (requestInfo.IsDynamicDomain()) { *ppRealHeader = (BYTE*)pCode + ALIGN_UP(blockSize, sizeof(void*)); } else { *ppRealHeader = NULL; } #endif // USE_INDIRECT_CODEHEADER } *ppCodeHeader = pCodeHdr; *ppCodeHeaderRW = pCodeHdrRW; } EEJitManager::DomainCodeHeapList *EEJitManager::GetCodeHeapList(CodeHeapRequestInfo *pInfo, LoaderAllocator *pAllocator, BOOL fDynamicOnly) { CONTRACTL { NOTHROW; GC_NOTRIGGER; PRECONDITION(m_CodeHeapCritSec.OwnedByCurrentThread()); } CONTRACTL_END; DomainCodeHeapList *pList = NULL; DomainCodeHeapList **ppList = NULL; int count = 0; // get the appropriate list of heaps // pMD is NULL for NGen modules during Module::LoadTokenTables if (fDynamicOnly || (pInfo != NULL && pInfo->IsDynamicDomain())) { ppList = m_DynamicDomainCodeHeaps.Table(); count = m_DynamicDomainCodeHeaps.Count(); } else { ppList = m_DomainCodeHeaps.Table(); count = m_DomainCodeHeaps.Count(); } // this is a virtual call - pull it out of the loop BOOL fCanUnload = pAllocator->CanUnload(); // look for a DomainCodeHeapList for (int i=0; i < count; i++) { if (ppList[i]->m_pAllocator == pAllocator || (!fCanUnload && !ppList[i]->m_pAllocator->CanUnload())) { pList = ppList[i]; break; } } return pList; } bool EEJitManager::CanUseCodeHeap(CodeHeapRequestInfo *pInfo, HeapList *pCodeHeap) { CONTRACTL { NOTHROW; GC_NOTRIGGER; PRECONDITION(m_CodeHeapCritSec.OwnedByCurrentThread()); } CONTRACTL_END; bool retVal = false; if ((pInfo->m_loAddr == 0) && (pInfo->m_hiAddr == 0)) { // We have no constraint so this non empty heap will be able to satisfy our request if (pInfo->IsDynamicDomain()) { _ASSERTE(pCodeHeap->reserveForJumpStubs == 0); retVal = true; } else { BYTE * lastAddr = (BYTE *) pCodeHeap->startAddress + pCodeHeap->maxCodeHeapSize; BYTE * loRequestAddr = (BYTE *) pCodeHeap->endAddress; BYTE * hiRequestAddr = loRequestAddr + pInfo->getRequestSize() + BYTES_PER_BUCKET; if (hiRequestAddr <= lastAddr - pCodeHeap->reserveForJumpStubs) { retVal = true; } } } else { // We also check to see if an allocation in this heap would satisfy // the [loAddr..hiAddr] requirement // Calculate the byte range that can ever be returned by // an allocation in this HeapList element // BYTE * firstAddr = (BYTE *) pCodeHeap->startAddress; BYTE * lastAddr = (BYTE *) pCodeHeap->startAddress + pCodeHeap->maxCodeHeapSize; _ASSERTE(pCodeHeap->startAddress <= pCodeHeap->endAddress); _ASSERTE(firstAddr <= lastAddr); if (pInfo->IsDynamicDomain()) { _ASSERTE(pCodeHeap->reserveForJumpStubs == 0); // We check to see if every allocation in this heap // will satisfy the [loAddr..hiAddr] requirement. // // Dynamic domains use a free list allocator, // thus we can receive any address in the range // when calling AllocMemory with a DynamicDomain // [firstaddr .. lastAddr] must be entirely within // [pInfo->m_loAddr .. pInfo->m_hiAddr] // if ((pInfo->m_loAddr <= firstAddr) && (lastAddr <= pInfo->m_hiAddr)) { // This heap will always satisfy our constraint retVal = true; } } else // non-DynamicDomain { // Calculate the byte range that would be allocated for the // next allocation request into [loRequestAddr..hiRequestAddr] // BYTE * loRequestAddr = (BYTE *) pCodeHeap->endAddress; BYTE * hiRequestAddr = loRequestAddr + pInfo->getRequestSize() + BYTES_PER_BUCKET; _ASSERTE(loRequestAddr <= hiRequestAddr); // loRequestAddr and hiRequestAddr must be entirely within // [pInfo->m_loAddr .. pInfo->m_hiAddr] // if ((pInfo->m_loAddr <= loRequestAddr) && (hiRequestAddr <= pInfo->m_hiAddr)) { // Additionally hiRequestAddr must also be less than or equal to lastAddr. // If throwOnOutOfMemoryWithinRange is not set, conserve reserveForJumpStubs until when it is really needed. if (hiRequestAddr <= lastAddr - (pInfo->getThrowOnOutOfMemoryWithinRange() ? 0 : pCodeHeap->reserveForJumpStubs)) { // This heap will be able to satisfy our constraint retVal = true; } } } } return retVal; } EEJitManager::DomainCodeHeapList * EEJitManager::CreateCodeHeapList(CodeHeapRequestInfo *pInfo) { CONTRACTL { THROWS; GC_NOTRIGGER; PRECONDITION(m_CodeHeapCritSec.OwnedByCurrentThread()); } CONTRACTL_END; NewHolder<DomainCodeHeapList> pNewList(new DomainCodeHeapList()); pNewList->m_pAllocator = pInfo->m_pAllocator; DomainCodeHeapList **ppList = NULL; if (pInfo->IsDynamicDomain()) ppList = m_DynamicDomainCodeHeaps.AppendThrowing(); else ppList = m_DomainCodeHeaps.AppendThrowing(); *ppList = pNewList; return pNewList.Extract(); } LoaderHeap *EEJitManager::GetJitMetaHeap(MethodDesc *pMD) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; LoaderAllocator *pAllocator = pMD->GetLoaderAllocator(); _ASSERTE(pAllocator); return pAllocator->GetLowFrequencyHeap(); } BYTE* EEJitManager::allocGCInfo(CodeHeader* pCodeHeader, DWORD blockSize, size_t * pAllocationSize) { CONTRACTL { THROWS; GC_NOTRIGGER; } CONTRACTL_END; MethodDesc* pMD = pCodeHeader->GetMethodDesc(); // sadly for light code gen I need the check in here. We should change GetJitMetaHeap if (pMD->IsLCGMethod()) { CrstHolder ch(&m_CodeHeapCritSec); pCodeHeader->SetGCInfo((BYTE*)(void*)pMD->AsDynamicMethodDesc()->GetResolver()->GetJitMetaHeap()->New(blockSize)); } else { pCodeHeader->SetGCInfo((BYTE*) (void*)GetJitMetaHeap(pMD)->AllocMem(S_SIZE_T(blockSize))); } _ASSERTE(pCodeHeader->GetGCInfo()); // AllocMem throws if there's not enough memory * pAllocationSize = blockSize; // Store the allocation size so we can backout later. return(pCodeHeader->GetGCInfo()); } void* EEJitManager::allocEHInfoRaw(CodeHeader* pCodeHeader, DWORD blockSize, size_t * pAllocationSize) { CONTRACTL { THROWS; GC_NOTRIGGER; } CONTRACTL_END; MethodDesc* pMD = pCodeHeader->GetMethodDesc(); void * mem = NULL; // sadly for light code gen I need the check in here. We should change GetJitMetaHeap if (pMD->IsLCGMethod()) { CrstHolder ch(&m_CodeHeapCritSec); mem = (void*)pMD->AsDynamicMethodDesc()->GetResolver()->GetJitMetaHeap()->New(blockSize); } else { mem = (void*)GetJitMetaHeap(pMD)->AllocMem(S_SIZE_T(blockSize)); } _ASSERTE(mem); // AllocMem throws if there's not enough memory * pAllocationSize = blockSize; // Store the allocation size so we can backout later. return(mem); } EE_ILEXCEPTION* EEJitManager::allocEHInfo(CodeHeader* pCodeHeader, unsigned numClauses, size_t * pAllocationSize) { CONTRACTL { THROWS; GC_NOTRIGGER; } CONTRACTL_END; // Note - pCodeHeader->phdrJitEHInfo - sizeof(size_t) contains the number of EH clauses DWORD temp = EE_ILEXCEPTION::Size(numClauses); DWORD blockSize = 0; if (!ClrSafeInt<DWORD>::addition(temp, sizeof(size_t), blockSize)) COMPlusThrowOM(); BYTE *EHInfo = (BYTE*)allocEHInfoRaw(pCodeHeader, blockSize, pAllocationSize); pCodeHeader->SetEHInfo((EE_ILEXCEPTION*) (EHInfo + sizeof(size_t))); pCodeHeader->GetEHInfo()->Init(numClauses); *((size_t *)EHInfo) = numClauses; return(pCodeHeader->GetEHInfo()); } JumpStubBlockHeader * EEJitManager::allocJumpStubBlock(MethodDesc* pMD, DWORD numJumps, BYTE * loAddr, BYTE * hiAddr, LoaderAllocator *pLoaderAllocator, bool throwOnOutOfMemoryWithinRange) { CONTRACT(JumpStubBlockHeader *) { THROWS; GC_NOTRIGGER; PRECONDITION(loAddr < hiAddr); PRECONDITION(pLoaderAllocator != NULL); POSTCONDITION((RETVAL != NULL) || !throwOnOutOfMemoryWithinRange); } CONTRACT_END; _ASSERTE((sizeof(JumpStubBlockHeader) % CODE_SIZE_ALIGN) == 0); size_t blockSize = sizeof(JumpStubBlockHeader) + (size_t) numJumps * BACK_TO_BACK_JUMP_ALLOCATE_SIZE; HeapList *pCodeHeap = NULL; CodeHeapRequestInfo requestInfo(pMD, pLoaderAllocator, loAddr, hiAddr); requestInfo.setThrowOnOutOfMemoryWithinRange(throwOnOutOfMemoryWithinRange); TADDR mem; ExecutableWriterHolder<JumpStubBlockHeader> blockWriterHolder; // Scope the lock { CrstHolder ch(&m_CodeHeapCritSec); mem = (TADDR) allocCodeRaw(&requestInfo, sizeof(CodeHeader), blockSize, CODE_SIZE_ALIGN, &pCodeHeap); if (mem == NULL) { _ASSERTE(!throwOnOutOfMemoryWithinRange); RETURN(NULL); } // CodeHeader comes immediately before the block CodeHeader * pCodeHdr = (CodeHeader *) (mem - sizeof(CodeHeader)); ExecutableWriterHolder<CodeHeader> codeHdrWriterHolder(pCodeHdr, sizeof(CodeHeader)); codeHdrWriterHolder.GetRW()->SetStubCodeBlockKind(STUB_CODE_BLOCK_JUMPSTUB); NibbleMapSetUnlocked(pCodeHeap, mem, TRUE); blockWriterHolder = ExecutableWriterHolder<JumpStubBlockHeader>((JumpStubBlockHeader *)mem, sizeof(JumpStubBlockHeader)); _ASSERTE(IS_ALIGNED(blockWriterHolder.GetRW(), CODE_SIZE_ALIGN)); } blockWriterHolder.GetRW()->m_next = NULL; blockWriterHolder.GetRW()->m_used = 0; blockWriterHolder.GetRW()->m_allocated = numJumps; if (pMD && pMD->IsLCGMethod()) blockWriterHolder.GetRW()->SetHostCodeHeap(static_cast<HostCodeHeap*>(pCodeHeap->pHeap)); else blockWriterHolder.GetRW()->SetLoaderAllocator(pLoaderAllocator); LOG((LF_JIT, LL_INFO1000, "Allocated new JumpStubBlockHeader for %d stubs at" FMT_ADDR " in loader allocator " FMT_ADDR "\n", numJumps, DBG_ADDR(mem) , DBG_ADDR(pLoaderAllocator) )); RETURN((JumpStubBlockHeader*)mem); } void * EEJitManager::allocCodeFragmentBlock(size_t blockSize, unsigned alignment, LoaderAllocator *pLoaderAllocator, StubCodeBlockKind kind) { CONTRACT(void *) { THROWS; GC_NOTRIGGER; PRECONDITION(pLoaderAllocator != NULL); POSTCONDITION(CheckPointer(RETVAL)); } CONTRACT_END; HeapList *pCodeHeap = NULL; CodeHeapRequestInfo requestInfo(NULL, pLoaderAllocator, NULL, NULL); #ifdef TARGET_AMD64 // CodeFragments are pretty much always Precodes that may need to be patched with jump stubs at some point in future // We will assume the worst case that every FixupPrecode will need to be patched and reserve the jump stubs accordingly requestInfo.setReserveForJumpStubs((blockSize / 8) * JUMP_ALLOCATE_SIZE); #endif TADDR mem; // Scope the lock { CrstHolder ch(&m_CodeHeapCritSec); mem = (TADDR) allocCodeRaw(&requestInfo, sizeof(CodeHeader), blockSize, alignment, &pCodeHeap); // CodeHeader comes immediately before the block CodeHeader * pCodeHdr = (CodeHeader *) (mem - sizeof(CodeHeader)); ExecutableWriterHolder<CodeHeader> codeHdrWriterHolder(pCodeHdr, sizeof(CodeHeader)); codeHdrWriterHolder.GetRW()->SetStubCodeBlockKind(kind); NibbleMapSetUnlocked(pCodeHeap, mem, TRUE); // Record the jump stub reservation pCodeHeap->reserveForJumpStubs += requestInfo.getReserveForJumpStubs(); } RETURN((void *)mem); } #endif // !DACCESS_COMPILE GCInfoToken EEJitManager::GetGCInfoToken(const METHODTOKEN& MethodToken) { CONTRACTL { NOTHROW; GC_NOTRIGGER; HOST_NOCALLS; SUPPORTS_DAC; } CONTRACTL_END; // The JIT-ed code always has the current version of GCInfo return{ GetCodeHeader(MethodToken)->GetGCInfo(), GCINFO_VERSION }; } // creates an enumeration and returns the number of EH clauses unsigned EEJitManager::InitializeEHEnumeration(const METHODTOKEN& MethodToken, EH_CLAUSE_ENUMERATOR* pEnumState) { LIMITED_METHOD_CONTRACT; EE_ILEXCEPTION * EHInfo = GetCodeHeader(MethodToken)->GetEHInfo(); pEnumState->iCurrentPos = 0; // since the EH info is not compressed, the clause number is used to do the enumeration pEnumState->pExceptionClauseArray = NULL; if (!EHInfo) return 0; pEnumState->pExceptionClauseArray = dac_cast<TADDR>(EHInfo->EHClause(0)); return *(dac_cast<PTR_unsigned>(dac_cast<TADDR>(EHInfo) - sizeof(size_t))); } PTR_EXCEPTION_CLAUSE_TOKEN EEJitManager::GetNextEHClause(EH_CLAUSE_ENUMERATOR* pEnumState, EE_ILEXCEPTION_CLAUSE* pEHClauseOut) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; unsigned iCurrentPos = pEnumState->iCurrentPos; pEnumState->iCurrentPos++; EE_ILEXCEPTION_CLAUSE* pClause = &(dac_cast<PTR_EE_ILEXCEPTION_CLAUSE>(pEnumState->pExceptionClauseArray)[iCurrentPos]); *pEHClauseOut = *pClause; return dac_cast<PTR_EXCEPTION_CLAUSE_TOKEN>(pClause); } #ifndef DACCESS_COMPILE TypeHandle EEJitManager::ResolveEHClause(EE_ILEXCEPTION_CLAUSE* pEHClause, CrawlFrame *pCf) { // We don't want to use a runtime contract here since this codepath is used during // the processing of a hard SO. Contracts use a significant amount of stack // which we can't afford for those cases. STATIC_CONTRACT_THROWS; STATIC_CONTRACT_GC_TRIGGERS; _ASSERTE(NULL != pCf); _ASSERTE(NULL != pEHClause); _ASSERTE(IsTypedHandler(pEHClause)); TypeHandle typeHnd = TypeHandle(); mdToken typeTok = mdTokenNil; // CachedTypeHandle's are filled in at JIT time, and not cached when accessed multiple times if (HasCachedTypeHandle(pEHClause)) { return TypeHandle::FromPtr(pEHClause->TypeHandle); } else { typeTok = pEHClause->ClassToken; } MethodDesc* pMD = pCf->GetFunction(); Module* pModule = pMD->GetModule(); PREFIX_ASSUME(pModule != NULL); SigTypeContext typeContext(pMD); VarKind k = hasNoVars; // In the vast majority of cases the code under the "if" below // will not be executed. // // First grab the representative instantiations. For code // shared by multiple generic instantiations these are the // canonical (representative) instantiation. if (TypeFromToken(typeTok) == mdtTypeSpec) { PCCOR_SIGNATURE pSig; ULONG cSig; IfFailThrow(pModule->GetMDImport()->GetTypeSpecFromToken(typeTok, &pSig, &cSig)); SigPointer psig(pSig, cSig); k = psig.IsPolyType(&typeContext); // Grab the active class and method instantiation. This exact instantiation is only // needed in the corner case of "generic" exception catching in shared // generic code. We don't need the exact instantiation if the token // doesn't contain E_T_VAR or E_T_MVAR. if ((k & hasSharableVarsMask) != 0) { Instantiation classInst; Instantiation methodInst; pCf->GetExactGenericInstantiations(&classInst, &methodInst); SigTypeContext::InitTypeContext(pMD,classInst, methodInst,&typeContext); } } return ClassLoader::LoadTypeDefOrRefOrSpecThrowing(pModule, typeTok, &typeContext, ClassLoader::ReturnNullIfNotFound); } void EEJitManager::RemoveJitData (CodeHeader * pCHdr, size_t GCinfo_len, size_t EHinfo_len) { CONTRACTL { NOTHROW; GC_TRIGGERS; } CONTRACTL_END; MethodDesc* pMD = pCHdr->GetMethodDesc(); if (pMD->IsLCGMethod()) { void * codeStart = (pCHdr + 1); { CrstHolder ch(&m_CodeHeapCritSec); LCGMethodResolver * pResolver = pMD->AsDynamicMethodDesc()->GetLCGMethodResolver(); // Clear the pointer only if it matches what we are about to free. // There can be cases where the JIT is reentered and we JITed the method multiple times. if (pResolver->m_recordCodePointer == codeStart) pResolver->m_recordCodePointer = NULL; } #if defined(TARGET_AMD64) // Remove the unwind information (if applicable) UnwindInfoTable::UnpublishUnwindInfoForMethod((TADDR)codeStart); #endif // defined(TARGET_AMD64) HostCodeHeap* pHeap = HostCodeHeap::GetCodeHeap((TADDR)codeStart); FreeCodeMemory(pHeap, codeStart); // We are leaking GCInfo and EHInfo. They will be freed once the dynamic method is destroyed. return; } { CrstHolder ch(&m_CodeHeapCritSec); HeapList *pHp = GetCodeHeapList(); while (pHp && ((pHp->startAddress > (TADDR)pCHdr) || (pHp->endAddress < (TADDR)pCHdr + sizeof(CodeHeader)))) { pHp = pHp->GetNext(); } _ASSERTE(pHp && pHp->pHdrMap); // Better to just return than AV? if (pHp == NULL) return; NibbleMapSetUnlocked(pHp, (TADDR)(pCHdr + 1), FALSE); } // Backout the GCInfo if (GCinfo_len > 0) { GetJitMetaHeap(pMD)->BackoutMem(pCHdr->GetGCInfo(), GCinfo_len); } // Backout the EHInfo BYTE *EHInfo = (BYTE *)pCHdr->GetEHInfo(); if (EHInfo) { EHInfo -= sizeof(size_t); _ASSERTE(EHinfo_len>0); GetJitMetaHeap(pMD)->BackoutMem(EHInfo, EHinfo_len); } // <TODO> // TODO: Although we have backout the GCInfo and EHInfo, we haven't actually backout the // code buffer itself. As a result, we might leak the CodeHeap if jitting fails after // the code buffer is allocated. // // However, it appears non-trival to fix this. // Here are some of the reasons: // (1) AllocCode calls in AllocCodeRaw to alloc code buffer in the CodeHeap. The exact size // of the code buffer is not known until the alignment is calculated deep on the stack. // (2) AllocCodeRaw is called in 3 different places. We might need to remember the // information for these places. // (3) AllocCodeRaw might create a new CodeHeap. We should remember exactly which // CodeHeap is used to allocate the code buffer. // // Fortunately, this is not a severe leak since the CodeHeap will be reclaimed on appdomain unload. // // </TODO> return; } // appdomain is being unloaded, so delete any data associated with it. We have to do this in two stages. // On the first stage, we remove the elements from the list. On the second stage, which occurs after a GC // we know that only threads who were in preemptive mode prior to the GC could possibly still be looking // at an element that is about to be deleted. All such threads are guarded with a reader count, so if the // count is 0, we can safely delete, otherwise we must add to the cleanup list to be deleted later. We know // there can only be one unload at a time, so we can use a single var to hold the unlinked, but not deleted, // elements. void EEJitManager::Unload(LoaderAllocator *pAllocator) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; CrstHolder ch(&m_CodeHeapCritSec); DomainCodeHeapList **ppList = m_DomainCodeHeaps.Table(); int count = m_DomainCodeHeaps.Count(); for (int i=0; i < count; i++) { if (ppList[i]->m_pAllocator== pAllocator) { DomainCodeHeapList *pList = ppList[i]; m_DomainCodeHeaps.DeleteByIndex(i); // pHeapList is allocated in pHeap, so only need to delete the LoaderHeap itself count = pList->m_CodeHeapList.Count(); for (i=0; i < count; i++) { HeapList *pHeapList = pList->m_CodeHeapList[i]; DeleteCodeHeap(pHeapList); } // this is ok to do delete as anyone accessing the DomainCodeHeapList structure holds the critical section. delete pList; break; } } ppList = m_DynamicDomainCodeHeaps.Table(); count = m_DynamicDomainCodeHeaps.Count(); for (int i=0; i < count; i++) { if (ppList[i]->m_pAllocator== pAllocator) { DomainCodeHeapList *pList = ppList[i]; m_DynamicDomainCodeHeaps.DeleteByIndex(i); // pHeapList is allocated in pHeap, so only need to delete the CodeHeap itself count = pList->m_CodeHeapList.Count(); for (i=0; i < count; i++) { HeapList *pHeapList = pList->m_CodeHeapList[i]; // m_DynamicDomainCodeHeaps should only contain HostCodeHeap. RemoveFromCleanupList(static_cast<HostCodeHeap*>(pHeapList->pHeap)); DeleteCodeHeap(pHeapList); } // this is ok to do delete as anyone accessing the DomainCodeHeapList structure holds the critical section. delete pList; break; } } ExecutableAllocator::ResetLazyPreferredRangeHint(); } EEJitManager::DomainCodeHeapList::DomainCodeHeapList() { LIMITED_METHOD_CONTRACT; m_pAllocator = NULL; } EEJitManager::DomainCodeHeapList::~DomainCodeHeapList() { LIMITED_METHOD_CONTRACT; } void EEJitManager::RemoveCodeHeapFromDomainList(CodeHeap *pHeap, LoaderAllocator *pAllocator) { CONTRACTL { NOTHROW; GC_NOTRIGGER; PRECONDITION(m_CodeHeapCritSec.OwnedByCurrentThread()); } CONTRACTL_END; // get the AppDomain heap list for pAllocator in m_DynamicDomainCodeHeaps DomainCodeHeapList *pList = GetCodeHeapList(NULL, pAllocator, TRUE); // go through the heaps and find and remove pHeap int count = pList->m_CodeHeapList.Count(); for (int i = 0; i < count; i++) { HeapList *pHeapList = pList->m_CodeHeapList[i]; if (pHeapList->pHeap == pHeap) { // found the heap to remove. If this is the only heap we remove the whole DomainCodeHeapList // otherwise we just remove this heap if (count == 1) { m_DynamicDomainCodeHeaps.Delete(pList); delete pList; } else pList->m_CodeHeapList.Delete(i); // if this heaplist is cached in the loader allocator, we must clear it if (pAllocator->m_pLastUsedDynamicCodeHeap == ((void *) pHeapList)) { pAllocator->m_pLastUsedDynamicCodeHeap = NULL; } break; } } } void EEJitManager::FreeCodeMemory(HostCodeHeap *pCodeHeap, void * codeStart) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; CrstHolder ch(&m_CodeHeapCritSec); // FreeCodeMemory is only supported on LCG methods, // so pCodeHeap can only be a HostCodeHeap. // clean up the NibbleMap NibbleMapSetUnlocked(pCodeHeap->m_pHeapList, (TADDR)codeStart, FALSE); // The caller of this method doesn't call HostCodeHeap->FreeMemForCode // directly because the operation should be protected by m_CodeHeapCritSec. pCodeHeap->FreeMemForCode(codeStart); } void ExecutionManager::CleanupCodeHeaps() { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; _ASSERTE (g_fProcessDetach || (GCHeapUtilities::IsGCInProgress() && ::IsGCThread())); GetEEJitManager()->CleanupCodeHeaps(); } void EEJitManager::CleanupCodeHeaps() { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; _ASSERTE (g_fProcessDetach || (GCHeapUtilities::IsGCInProgress() && ::IsGCThread())); // Quick out, don't even take the lock if we have not cleanup to do. // This is important because ETW takes the CodeHeapLock when it is doing // rundown, and if there are many JIT compiled methods, this can take a while. // Because cleanup is called synchronously before a GC, this means GCs get // blocked while ETW is doing rundown. By not taking the lock we avoid // this stall most of the time since cleanup is rare, and ETW rundown is rare // the likelihood of both is very very rare. if (m_cleanupList == NULL) return; CrstHolder ch(&m_CodeHeapCritSec); if (m_cleanupList == NULL) return; HostCodeHeap *pHeap = m_cleanupList; m_cleanupList = NULL; while (pHeap) { HostCodeHeap *pNextHeap = pHeap->m_pNextHeapToRelease; DWORD allocCount = pHeap->m_AllocationCount; if (allocCount == 0) { LOG((LF_BCL, LL_INFO100, "Level2 - Destryoing CodeHeap [0x%p, vt(0x%x)] - ref count 0\n", pHeap, *(size_t*)pHeap)); RemoveCodeHeapFromDomainList(pHeap, pHeap->m_pAllocator); DeleteCodeHeap(pHeap->m_pHeapList); } else { LOG((LF_BCL, LL_INFO100, "Level2 - Restoring CodeHeap [0x%p, vt(0x%x)] - ref count %d\n", pHeap, *(size_t*)pHeap, allocCount)); } pHeap = pNextHeap; } } void EEJitManager::RemoveFromCleanupList(HostCodeHeap *pCodeHeap) { CONTRACTL { NOTHROW; GC_NOTRIGGER; PRECONDITION(m_CodeHeapCritSec.OwnedByCurrentThread()); } CONTRACTL_END; HostCodeHeap *pHeap = m_cleanupList; HostCodeHeap *pPrevHeap = NULL; while (pHeap) { if (pHeap == pCodeHeap) { if (pPrevHeap) { // remove current heap from list pPrevHeap->m_pNextHeapToRelease = pHeap->m_pNextHeapToRelease; } else { m_cleanupList = pHeap->m_pNextHeapToRelease; } break; } pPrevHeap = pHeap; pHeap = pHeap->m_pNextHeapToRelease; } } void EEJitManager::AddToCleanupList(HostCodeHeap *pCodeHeap) { CONTRACTL { NOTHROW; GC_NOTRIGGER; PRECONDITION(m_CodeHeapCritSec.OwnedByCurrentThread()); } CONTRACTL_END; // it may happen that the current heap count goes to 0 and later on, before it is destroyed, it gets reused // for another dynamic method. // It's then possible that the ref count reaches 0 multiple times. If so we simply don't add it again // Also on cleanup we check the the ref count is actually 0. HostCodeHeap *pHeap = m_cleanupList; while (pHeap) { if (pHeap == pCodeHeap) { LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap [0x%p, vt(0x%x)] - Already in list\n", pCodeHeap, *(size_t*)pCodeHeap)); break; } pHeap = pHeap->m_pNextHeapToRelease; } if (pHeap == NULL) { pCodeHeap->m_pNextHeapToRelease = m_cleanupList; m_cleanupList = pCodeHeap; LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap [0x%p, vt(0x%x)] - ref count %d - Adding to cleanup list\n", pCodeHeap, *(size_t*)pCodeHeap, pCodeHeap->m_AllocationCount)); } } void EEJitManager::DeleteCodeHeap(HeapList *pHeapList) { CONTRACTL { NOTHROW; GC_NOTRIGGER; PRECONDITION(m_CodeHeapCritSec.OwnedByCurrentThread()); } CONTRACTL_END; HeapList *pHp = GetCodeHeapList(); if (pHp == pHeapList) m_pCodeHeap = pHp->GetNext(); else { HeapList *pHpNext = pHp->GetNext(); while (pHpNext != pHeapList) { pHp = pHpNext; _ASSERTE(pHp != NULL); // should always find the HeapList pHpNext = pHp->GetNext(); } pHp->SetNext(pHeapList->GetNext()); } DeleteEEFunctionTable((PVOID)pHeapList->GetModuleBase()); ExecutionManager::DeleteRange((TADDR)pHeapList->GetModuleBase()); LOG((LF_JIT, LL_INFO100, "DeleteCodeHeap start" FMT_ADDR "end" FMT_ADDR "\n", (const BYTE*)pHeapList->startAddress, (const BYTE*)pHeapList->endAddress )); CodeHeap* pHeap = pHeapList->pHeap; delete pHeap; delete pHeapList; } #endif // #ifndef DACCESS_COMPILE static CodeHeader * GetCodeHeaderFromDebugInfoRequest(const DebugInfoRequest & request) { CONTRACTL { NOTHROW; GC_NOTRIGGER; SUPPORTS_DAC; } CONTRACTL_END; TADDR address = (TADDR) request.GetStartAddress(); _ASSERTE(address != NULL); CodeHeader * pHeader = dac_cast<PTR_CodeHeader>(address & ~3) - 1; _ASSERTE(pHeader != NULL); return pHeader; } //----------------------------------------------------------------------------- // Get vars from Jit Store //----------------------------------------------------------------------------- BOOL EEJitManager::GetBoundariesAndVars( const DebugInfoRequest & request, IN FP_IDS_NEW fpNew, IN void * pNewData, OUT ULONG32 * pcMap, OUT ICorDebugInfo::OffsetMapping **ppMap, OUT ULONG32 * pcVars, OUT ICorDebugInfo::NativeVarInfo **ppVars) { CONTRACTL { THROWS; // on OOM. GC_NOTRIGGER; // getting vars shouldn't trigger SUPPORTS_DAC; } CONTRACTL_END; CodeHeader * pHdr = GetCodeHeaderFromDebugInfoRequest(request); _ASSERTE(pHdr != NULL); PTR_BYTE pDebugInfo = pHdr->GetDebugInfo(); // No header created, which means no jit information is available. if (pDebugInfo == NULL) return FALSE; #ifdef FEATURE_ON_STACK_REPLACEMENT BOOL hasFlagByte = TRUE; #else BOOL hasFlagByte = FALSE; #endif // Uncompress. This allocates memory and may throw. CompressDebugInfo::RestoreBoundariesAndVars( fpNew, pNewData, // allocators pDebugInfo, // input pcMap, ppMap, // output pcVars, ppVars, // output hasFlagByte ); return TRUE; } #ifdef DACCESS_COMPILE void CodeHeader::EnumMemoryRegions(CLRDataEnumMemoryFlags flags, IJitManager* pJitMan) { CONTRACTL { NOTHROW; GC_NOTRIGGER; SUPPORTS_DAC; } CONTRACTL_END; DAC_ENUM_DTHIS(); #ifdef USE_INDIRECT_CODEHEADER this->pRealCodeHeader.EnumMem(); #endif // USE_INDIRECT_CODEHEADER #ifdef FEATURE_ON_STACK_REPLACEMENT BOOL hasFlagByte = TRUE; #else BOOL hasFlagByte = FALSE; #endif if (this->GetDebugInfo() != NULL) { CompressDebugInfo::EnumMemoryRegions(flags, this->GetDebugInfo(), hasFlagByte); } } //----------------------------------------------------------------------------- // Enumerate for minidumps. //----------------------------------------------------------------------------- void EEJitManager::EnumMemoryRegionsForMethodDebugInfo(CLRDataEnumMemoryFlags flags, MethodDesc * pMD) { CONTRACTL { NOTHROW; GC_NOTRIGGER; SUPPORTS_DAC; } CONTRACTL_END; DebugInfoRequest request; PCODE addrCode = pMD->GetNativeCode(); request.InitFromStartingAddr(pMD, addrCode); CodeHeader * pHeader = GetCodeHeaderFromDebugInfoRequest(request); pHeader->EnumMemoryRegions(flags, NULL); } #endif // DACCESS_COMPILE PCODE EEJitManager::GetCodeAddressForRelOffset(const METHODTOKEN& MethodToken, DWORD relOffset) { WRAPPER_NO_CONTRACT; CodeHeader * pHeader = GetCodeHeader(MethodToken); return pHeader->GetCodeStartAddress() + relOffset; } BOOL EEJitManager::JitCodeToMethodInfo( RangeSection * pRangeSection, PCODE currentPC, MethodDesc ** ppMethodDesc, EECodeInfo * pCodeInfo) { CONTRACTL { NOTHROW; GC_NOTRIGGER; SUPPORTS_DAC; } CONTRACTL_END; _ASSERTE(pRangeSection != NULL); TADDR start = dac_cast<PTR_EEJitManager>(pRangeSection->pjit)->FindMethodCode(pRangeSection, currentPC); if (start == NULL) return FALSE; CodeHeader * pCHdr = PTR_CodeHeader(start - sizeof(CodeHeader)); if (pCHdr->IsStubCodeBlock()) return FALSE; _ASSERTE(pCHdr->GetMethodDesc()->SanityCheck()); if (pCodeInfo) { pCodeInfo->m_methodToken = METHODTOKEN(pRangeSection, dac_cast<TADDR>(pCHdr)); // This can be counted on for Jitted code. For NGEN code in the case // where we have hot/cold splitting this isn't valid and we need to // take into account cold code. pCodeInfo->m_relOffset = (DWORD)(PCODEToPINSTR(currentPC) - pCHdr->GetCodeStartAddress()); #ifdef FEATURE_EH_FUNCLETS // Computed lazily by code:EEJitManager::LazyGetFunctionEntry pCodeInfo->m_pFunctionEntry = NULL; #endif } if (ppMethodDesc) { *ppMethodDesc = pCHdr->GetMethodDesc(); } return TRUE; } StubCodeBlockKind EEJitManager::GetStubCodeBlockKind(RangeSection * pRangeSection, PCODE currentPC) { CONTRACTL { NOTHROW; GC_NOTRIGGER; SUPPORTS_DAC; } CONTRACTL_END; TADDR start = dac_cast<PTR_EEJitManager>(pRangeSection->pjit)->FindMethodCode(pRangeSection, currentPC); if (start == NULL) return STUB_CODE_BLOCK_NOCODE; CodeHeader * pCHdr = PTR_CodeHeader(start - sizeof(CodeHeader)); return pCHdr->IsStubCodeBlock() ? pCHdr->GetStubCodeBlockKind() : STUB_CODE_BLOCK_MANAGED; } TADDR EEJitManager::FindMethodCode(PCODE currentPC) { CONTRACTL { NOTHROW; GC_NOTRIGGER; SUPPORTS_DAC; } CONTRACTL_END; RangeSection * pRS = ExecutionManager::FindCodeRange(currentPC, ExecutionManager::GetScanFlags()); if (pRS == NULL || (pRS->flags & RangeSection::RANGE_SECTION_CODEHEAP) == 0) return STUB_CODE_BLOCK_NOCODE; return dac_cast<PTR_EEJitManager>(pRS->pjit)->FindMethodCode(pRS, currentPC); } // Finds the header corresponding to the code at offset "delta". // Returns NULL if there is no header for the given "delta" TADDR EEJitManager::FindMethodCode(RangeSection * pRangeSection, PCODE currentPC) { LIMITED_METHOD_DAC_CONTRACT; _ASSERTE(pRangeSection != NULL); HeapList *pHp = dac_cast<PTR_HeapList>(pRangeSection->pHeapListOrZapModule); if ((currentPC < pHp->startAddress) || (currentPC > pHp->endAddress)) { return NULL; } TADDR base = pHp->mapBase; TADDR delta = currentPC - base; PTR_DWORD pMap = pHp->pHdrMap; PTR_DWORD pMapStart = pMap; DWORD tmp; size_t startPos = ADDR2POS(delta); // align to 32byte buckets // ( == index into the array of nibbles) DWORD offset = ADDR2OFFS(delta); // this is the offset inside the bucket + 1 _ASSERTE(offset == (offset & NIBBLE_MASK)); pMap += (startPos >> LOG2_NIBBLES_PER_DWORD); // points to the proper DWORD of the map // get DWORD and shift down our nibble PREFIX_ASSUME(pMap != NULL); tmp = VolatileLoadWithoutBarrier<DWORD>(pMap) >> POS2SHIFTCOUNT(startPos); if ((tmp & NIBBLE_MASK) && ((tmp & NIBBLE_MASK) <= offset) ) { return base + POSOFF2ADDR(startPos, tmp & NIBBLE_MASK); } // Is there a header in the remainder of the DWORD ? tmp = tmp >> NIBBLE_SIZE; if (tmp) { startPos--; while (!(tmp & NIBBLE_MASK)) { tmp = tmp >> NIBBLE_SIZE; startPos--; } return base + POSOFF2ADDR(startPos, tmp & NIBBLE_MASK); } // We skipped the remainder of the DWORD, // so we must set startPos to the highest position of // previous DWORD, unless we are already on the first DWORD if (startPos < NIBBLES_PER_DWORD) return NULL; startPos = ((startPos >> LOG2_NIBBLES_PER_DWORD) << LOG2_NIBBLES_PER_DWORD) - 1; // Skip "headerless" DWORDS while (pMapStart < pMap && 0 == (tmp = VolatileLoadWithoutBarrier<DWORD>(--pMap))) { startPos -= NIBBLES_PER_DWORD; } // This helps to catch degenerate error cases. This relies on the fact that // startPos cannot ever be bigger than MAX_UINT if (((INT_PTR)startPos) < 0) return NULL; // Find the nibble with the header in the DWORD while (startPos && !(tmp & NIBBLE_MASK)) { tmp = tmp >> NIBBLE_SIZE; startPos--; } if (startPos == 0 && tmp == 0) return NULL; return base + POSOFF2ADDR(startPos, tmp & NIBBLE_MASK); } #if !defined(DACCESS_COMPILE) void EEJitManager::NibbleMapSet(HeapList * pHp, TADDR pCode, BOOL bSet) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; CrstHolder ch(&m_CodeHeapCritSec); NibbleMapSetUnlocked(pHp, pCode, bSet); } void EEJitManager::NibbleMapSetUnlocked(HeapList * pHp, TADDR pCode, BOOL bSet) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; // Currently all callers to this method ensure EEJitManager::m_CodeHeapCritSec // is held. _ASSERTE(m_CodeHeapCritSec.OwnedByCurrentThread()); _ASSERTE(pCode >= pHp->mapBase); size_t delta = pCode - pHp->mapBase; size_t pos = ADDR2POS(delta); DWORD value = bSet?ADDR2OFFS(delta):0; DWORD index = (DWORD) (pos >> LOG2_NIBBLES_PER_DWORD); DWORD mask = ~((DWORD) HIGHEST_NIBBLE_MASK >> ((pos & NIBBLES_PER_DWORD_MASK) << LOG2_NIBBLE_SIZE)); value = value << POS2SHIFTCOUNT(pos); PTR_DWORD pMap = pHp->pHdrMap; // assert that we don't overwrite an existing offset // (it's a reset or it is empty) _ASSERTE(!value || !((*(pMap+index))& ~mask)); // It is important for this update to be atomic. Synchronization would be required with FindMethodCode otherwise. *(pMap+index) = ((*(pMap+index))&mask)|value; } #endif // !DACCESS_COMPILE #if defined(FEATURE_EH_FUNCLETS) // Note: This returns the root unwind record (the one that describes the prolog) // in cases where there is fragmented unwind. PTR_RUNTIME_FUNCTION EEJitManager::LazyGetFunctionEntry(EECodeInfo * pCodeInfo) { CONTRACTL { NOTHROW; GC_NOTRIGGER; SUPPORTS_DAC; } CONTRACTL_END; if (!pCodeInfo->IsValid()) { return NULL; } CodeHeader * pHeader = GetCodeHeader(pCodeInfo->GetMethodToken()); DWORD address = RUNTIME_FUNCTION__BeginAddress(pHeader->GetUnwindInfo(0)) + pCodeInfo->GetRelOffset(); // We need the module base address to calculate the end address of a function from the functionEntry. // Thus, save it off right now. TADDR baseAddress = pCodeInfo->GetModuleBase(); // NOTE: We could binary search here, if it would be helpful (e.g., large number of funclets) for (UINT iUnwindInfo = 0; iUnwindInfo < pHeader->GetNumberOfUnwindInfos(); iUnwindInfo++) { PTR_RUNTIME_FUNCTION pFunctionEntry = pHeader->GetUnwindInfo(iUnwindInfo); if (RUNTIME_FUNCTION__BeginAddress(pFunctionEntry) <= address && address < RUNTIME_FUNCTION__EndAddress(pFunctionEntry, baseAddress)) { #if defined(EXCEPTION_DATA_SUPPORTS_FUNCTION_FRAGMENTS) && defined(TARGET_ARM64) // If we might have fragmented unwind, and we're on ARM64, make sure // to returning the root record, as the trailing records don't have // prolog unwind codes. pFunctionEntry = FindRootEntry(pFunctionEntry, baseAddress); #endif return pFunctionEntry; } } return NULL; } DWORD EEJitManager::GetFuncletStartOffsets(const METHODTOKEN& MethodToken, DWORD* pStartFuncletOffsets, DWORD dwLength) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; CodeHeader * pCH = GetCodeHeader(MethodToken); TADDR moduleBase = JitTokenToModuleBase(MethodToken); _ASSERTE(pCH->GetNumberOfUnwindInfos() >= 1); DWORD parentBeginRva = RUNTIME_FUNCTION__BeginAddress(pCH->GetUnwindInfo(0)); DWORD nFunclets = 0; for (COUNT_T iUnwindInfo = 1; iUnwindInfo < pCH->GetNumberOfUnwindInfos(); iUnwindInfo++) { PTR_RUNTIME_FUNCTION pFunctionEntry = pCH->GetUnwindInfo(iUnwindInfo); #if defined(EXCEPTION_DATA_SUPPORTS_FUNCTION_FRAGMENTS) if (IsFunctionFragment(moduleBase, pFunctionEntry)) { // This is a fragment (not the funclet beginning); skip it continue; } #endif // EXCEPTION_DATA_SUPPORTS_FUNCTION_FRAGMENTS DWORD funcletBeginRva = RUNTIME_FUNCTION__BeginAddress(pFunctionEntry); DWORD relParentOffsetToFunclet = funcletBeginRva - parentBeginRva; if (nFunclets < dwLength) pStartFuncletOffsets[nFunclets] = relParentOffsetToFunclet; nFunclets++; } return nFunclets; } #if defined(DACCESS_COMPILE) // This function is basically like RtlLookupFunctionEntry(), except that it works with DAC // to read the function entries out of process. Also, it can only look up function entries // inside mscorwks.dll, since DAC doesn't know anything about other unmanaged dll's. void GetUnmanagedStackWalkInfo(IN ULONG64 ControlPc, OUT UINT_PTR* pModuleBase, OUT UINT_PTR* pFuncEntry) { WRAPPER_NO_CONTRACT; if (pModuleBase) { *pModuleBase = NULL; } if (pFuncEntry) { *pFuncEntry = NULL; } PEDecoder peDecoder(DacGlobalBase()); SIZE_T baseAddr = dac_cast<TADDR>(peDecoder.GetBase()); SIZE_T cbSize = (SIZE_T)peDecoder.GetVirtualSize(); // Check if the control PC is inside mscorwks. if ( (baseAddr <= ControlPc) && (ControlPc < (baseAddr + cbSize)) ) { if (pModuleBase) { *pModuleBase = baseAddr; } if (pFuncEntry) { // Check if there is a static function table. COUNT_T cbSize = 0; TADDR pExceptionDir = peDecoder.GetDirectoryEntryData(IMAGE_DIRECTORY_ENTRY_EXCEPTION, &cbSize); if (pExceptionDir != NULL) { // Do a binary search on the static function table of mscorwks.dll. HRESULT hr = E_FAIL; TADDR taFuncEntry; T_RUNTIME_FUNCTION functionEntry; DWORD dwLow = 0; DWORD dwHigh = cbSize / sizeof(T_RUNTIME_FUNCTION); DWORD dwMid = 0; while (dwLow <= dwHigh) { dwMid = (dwLow + dwHigh) >> 1; taFuncEntry = pExceptionDir + dwMid * sizeof(T_RUNTIME_FUNCTION); hr = DacReadAll(taFuncEntry, &functionEntry, sizeof(functionEntry), false); if (FAILED(hr)) { return; } if (ControlPc < baseAddr + functionEntry.BeginAddress) { dwHigh = dwMid - 1; } else if (ControlPc >= baseAddr + RUNTIME_FUNCTION__EndAddress(&functionEntry, baseAddr)) { dwLow = dwMid + 1; } else { _ASSERTE(pFuncEntry); #ifdef _TARGET_AMD64_ // On amd64, match RtlLookupFunctionEntry behavior by resolving indirect function entries // back to the associated owning function entry. if ((functionEntry.UnwindData & RUNTIME_FUNCTION_INDIRECT) != 0) { DWORD dwRvaOfOwningFunctionEntry = (functionEntry.UnwindData & ~RUNTIME_FUNCTION_INDIRECT); taFuncEntry = peDecoder.GetRvaData(dwRvaOfOwningFunctionEntry); hr = DacReadAll(taFuncEntry, &functionEntry, sizeof(functionEntry), false); if (FAILED(hr)) { return; } _ASSERTE((functionEntry.UnwindData & RUNTIME_FUNCTION_INDIRECT) == 0); } #endif // _TARGET_AMD64_ *pFuncEntry = (UINT_PTR)(T_RUNTIME_FUNCTION*)PTR_RUNTIME_FUNCTION(taFuncEntry); break; } } if (dwLow > dwHigh) { _ASSERTE(*pFuncEntry == NULL); } } } } } #endif // DACCESS_COMPILE extern "C" void GetRuntimeStackWalkInfo(IN ULONG64 ControlPc, OUT UINT_PTR* pModuleBase, OUT UINT_PTR* pFuncEntry) { WRAPPER_NO_CONTRACT; BEGIN_PRESERVE_LAST_ERROR; BEGIN_ENTRYPOINT_VOIDRET; if (pModuleBase) *pModuleBase = NULL; if (pFuncEntry) *pFuncEntry = NULL; EECodeInfo codeInfo((PCODE)ControlPc); if (!codeInfo.IsValid()) { #if defined(DACCESS_COMPILE) GetUnmanagedStackWalkInfo(ControlPc, pModuleBase, pFuncEntry); #endif // DACCESS_COMPILE goto Exit; } if (pModuleBase) { *pModuleBase = (UINT_PTR)codeInfo.GetModuleBase(); } if (pFuncEntry) { *pFuncEntry = (UINT_PTR)(PT_RUNTIME_FUNCTION)codeInfo.GetFunctionEntry(); } Exit: END_ENTRYPOINT_VOIDRET; END_PRESERVE_LAST_ERROR; } #endif // FEATURE_EH_FUNCLETS #ifdef DACCESS_COMPILE void EEJitManager::EnumMemoryRegions(CLRDataEnumMemoryFlags flags) { IJitManager::EnumMemoryRegions(flags); // // Save all of the code heaps. // HeapList* heap; for (heap = m_pCodeHeap; heap; heap = heap->GetNext()) { DacEnumHostDPtrMem(heap); if (heap->pHeap.IsValid()) { heap->pHeap->EnumMemoryRegions(flags); } DacEnumMemoryRegion(heap->startAddress, (ULONG32) (heap->endAddress - heap->startAddress)); if (heap->pHdrMap.IsValid()) { ULONG32 nibbleMapSize = (ULONG32) HEAP2MAPSIZE(ROUND_UP_TO_PAGE(heap->maxCodeHeapSize)); DacEnumMemoryRegion(dac_cast<TADDR>(heap->pHdrMap), nibbleMapSize); } } } #endif // #ifdef DACCESS_COMPILE #ifndef DACCESS_COMPILE //******************************************************* // Execution Manager //******************************************************* // Init statics void ExecutionManager::Init() { CONTRACTL { THROWS; GC_NOTRIGGER; } CONTRACTL_END; m_JumpStubCrst.Init(CrstJumpStubCache, CrstFlags(CRST_UNSAFE_ANYMODE|CRST_DEBUGGER_THREAD)); m_RangeCrst.Init(CrstExecuteManRangeLock, CRST_UNSAFE_ANYMODE); m_pDefaultCodeMan = new EECodeManager(); m_pEEJitManager = new EEJitManager(); #ifdef FEATURE_READYTORUN m_pReadyToRunJitManager = new ReadyToRunJitManager(); #endif } #endif // #ifndef DACCESS_COMPILE //************************************************************************** RangeSection * ExecutionManager::FindCodeRange(PCODE currentPC, ScanFlag scanFlag) { CONTRACTL { NOTHROW; GC_NOTRIGGER; SUPPORTS_DAC; } CONTRACTL_END; if (currentPC == NULL) return NULL; if (scanFlag == ScanReaderLock) return FindCodeRangeWithLock(currentPC); return GetRangeSection(currentPC); } //************************************************************************** NOINLINE // Make sure that the slow path with lock won't affect the fast path RangeSection * ExecutionManager::FindCodeRangeWithLock(PCODE currentPC) { CONTRACTL { NOTHROW; GC_NOTRIGGER; SUPPORTS_DAC; } CONTRACTL_END; ReaderLockHolder rlh; return GetRangeSection(currentPC); } //************************************************************************** PCODE ExecutionManager::GetCodeStartAddress(PCODE currentPC) { WRAPPER_NO_CONTRACT; _ASSERTE(currentPC != NULL); EECodeInfo codeInfo(currentPC); if (!codeInfo.IsValid()) return NULL; return PINSTRToPCODE(codeInfo.GetStartAddress()); } //************************************************************************** NativeCodeVersion ExecutionManager::GetNativeCodeVersion(PCODE currentPC) { CONTRACTL { NOTHROW; GC_NOTRIGGER; FORBID_FAULT; } CONTRACTL_END; EECodeInfo codeInfo(currentPC); return codeInfo.IsValid() ? codeInfo.GetNativeCodeVersion() : NativeCodeVersion(); } //************************************************************************** MethodDesc * ExecutionManager::GetCodeMethodDesc(PCODE currentPC) { CONTRACTL { NOTHROW; GC_NOTRIGGER; FORBID_FAULT; } CONTRACTL_END EECodeInfo codeInfo(currentPC); if (!codeInfo.IsValid()) return NULL; return codeInfo.GetMethodDesc(); } //************************************************************************** BOOL ExecutionManager::IsManagedCode(PCODE currentPC) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; if (currentPC == NULL) return FALSE; if (GetScanFlags() == ScanReaderLock) return IsManagedCodeWithLock(currentPC); return IsManagedCodeWorker(currentPC); } //************************************************************************** NOINLINE // Make sure that the slow path with lock won't affect the fast path BOOL ExecutionManager::IsManagedCodeWithLock(PCODE currentPC) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; ReaderLockHolder rlh; return IsManagedCodeWorker(currentPC); } //************************************************************************** BOOL ExecutionManager::IsManagedCode(PCODE currentPC, HostCallPreference hostCallPreference /*=AllowHostCalls*/, BOOL *pfFailedReaderLock /*=NULL*/) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; #ifdef DACCESS_COMPILE return IsManagedCode(currentPC); #else if (hostCallPreference == AllowHostCalls) { return IsManagedCode(currentPC); } ReaderLockHolder rlh(hostCallPreference); if (!rlh.Acquired()) { _ASSERTE(pfFailedReaderLock != NULL); *pfFailedReaderLock = TRUE; return FALSE; } return IsManagedCodeWorker(currentPC); #endif } //************************************************************************** // Assumes that the ExecutionManager reader/writer lock is taken or that // it is safe not to take it. BOOL ExecutionManager::IsManagedCodeWorker(PCODE currentPC) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; // This may get called for arbitrary code addresses. Note that the lock is // taken over the call to JitCodeToMethodInfo too so that nobody pulls out // the range section from underneath us. RangeSection * pRS = GetRangeSection(currentPC); if (pRS == NULL) return FALSE; if (pRS->flags & RangeSection::RANGE_SECTION_CODEHEAP) { // Typically if we find a Jit Manager we are inside a managed method // but on we could also be in a stub, so we check for that // as well and we don't consider stub to be real managed code. TADDR start = dac_cast<PTR_EEJitManager>(pRS->pjit)->FindMethodCode(pRS, currentPC); if (start == NULL) return FALSE; CodeHeader * pCHdr = PTR_CodeHeader(start - sizeof(CodeHeader)); if (!pCHdr->IsStubCodeBlock()) return TRUE; } #ifdef FEATURE_READYTORUN else if (pRS->flags & RangeSection::RANGE_SECTION_READYTORUN) { if (dac_cast<PTR_ReadyToRunJitManager>(pRS->pjit)->JitCodeToMethodInfo(pRS, currentPC, NULL, NULL)) return TRUE; } #endif return FALSE; } //************************************************************************** // Assumes that it is safe not to take it the ExecutionManager reader/writer lock BOOL ExecutionManager::IsReadyToRunCode(PCODE currentPC) { CONTRACTL{ NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; // This may get called for arbitrary code addresses. Note that the lock is // taken over the call to JitCodeToMethodInfo too so that nobody pulls out // the range section from underneath us. #ifdef FEATURE_READYTORUN RangeSection * pRS = GetRangeSection(currentPC); if (pRS != NULL && (pRS->flags & RangeSection::RANGE_SECTION_READYTORUN)) { if (dac_cast<PTR_ReadyToRunJitManager>(pRS->pjit)->JitCodeToMethodInfo(pRS, currentPC, NULL, NULL)) return TRUE; } #endif return FALSE; } #ifndef FEATURE_MERGE_JIT_AND_ENGINE /*********************************************************************/ // This static method returns the name of the jit dll // LPCWSTR ExecutionManager::GetJitName() { STANDARD_VM_CONTRACT; LPCWSTR pwzJitName = NULL; // Try to obtain a name for the jit library from the env. variable IfFailThrow(CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_JitName, const_cast<LPWSTR *>(&pwzJitName))); if (NULL == pwzJitName) { pwzJitName = MAKEDLLNAME_W(W("clrjit")); } return pwzJitName; } #endif // !FEATURE_MERGE_JIT_AND_ENGINE RangeSection* ExecutionManager::GetRangeSection(TADDR addr) { CONTRACTL { NOTHROW; HOST_NOCALLS; GC_NOTRIGGER; SUPPORTS_DAC; } CONTRACTL_END; RangeSection * pHead = m_CodeRangeList; if (pHead == NULL) { return NULL; } RangeSection *pCurr = pHead; RangeSection *pLast = NULL; #ifndef DACCESS_COMPILE RangeSection *pLastUsedRS = (pCurr != NULL) ? pCurr->pLastUsed : NULL; if (pLastUsedRS != NULL) { // positive case if ((addr >= pLastUsedRS->LowAddress) && (addr < pLastUsedRS->HighAddress) ) { return pLastUsedRS; } RangeSection * pNextAfterLastUsedRS = pLastUsedRS->pnext; // negative case if ((addr < pLastUsedRS->LowAddress) && (pNextAfterLastUsedRS == NULL || addr >= pNextAfterLastUsedRS->HighAddress)) { return NULL; } } #endif while (pCurr != NULL) { // See if addr is in [pCurr->LowAddress .. pCurr->HighAddress) if (pCurr->LowAddress <= addr) { // Since we are sorted, once pCurr->HighAddress is less than addr // then all subsequence ones will also be lower, so we are done. if (addr >= pCurr->HighAddress) { // we'll return NULL and put pLast into pLastUsed pCurr = NULL; } else { // addr must be in [pCurr->LowAddress .. pCurr->HighAddress) _ASSERTE((pCurr->LowAddress <= addr) && (addr < pCurr->HighAddress)); // Found the matching RangeSection // we'll return pCurr and put it into pLastUsed pLast = pCurr; } break; } pLast = pCurr; pCurr = pCurr->pnext; } #ifndef DACCESS_COMPILE // Cache pCurr as pLastUsed in the head node // Unless we are on an MP system with many cpus // where this sort of caching actually diminishes scaling during server GC // due to many processors writing to a common location if (g_SystemInfo.dwNumberOfProcessors < 4 || !GCHeapUtilities::IsServerHeap() || !GCHeapUtilities::IsGCInProgress()) pHead->pLastUsed = pLast; #endif return pCurr; } RangeSection* ExecutionManager::GetRangeSectionAndPrev(RangeSection *pHead, TADDR addr, RangeSection** ppPrev) { WRAPPER_NO_CONTRACT; RangeSection *pCurr; RangeSection *pPrev; RangeSection *result = NULL; for (pPrev = NULL, pCurr = pHead; pCurr != NULL; pPrev = pCurr, pCurr = pCurr->pnext) { // See if addr is in [pCurr->LowAddress .. pCurr->HighAddress) if (pCurr->LowAddress > addr) continue; if (addr >= pCurr->HighAddress) break; // addr must be in [pCurr->LowAddress .. pCurr->HighAddress) _ASSERTE((pCurr->LowAddress <= addr) && (addr < pCurr->HighAddress)); // Found the matching RangeSection result = pCurr; // Write back pPrev to ppPrev if it is non-null if (ppPrev != NULL) *ppPrev = pPrev; break; } // If we failed to find a match write NULL to ppPrev if it is non-null if ((ppPrev != NULL) && (result == NULL)) { *ppPrev = NULL; } return result; } /* static */ PTR_Module ExecutionManager::FindZapModule(TADDR currentData) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; STATIC_CONTRACT_HOST_CALLS; SUPPORTS_DAC; } CONTRACTL_END; ReaderLockHolder rlh; RangeSection * pRS = GetRangeSection(currentData); if (pRS == NULL) return NULL; if (pRS->flags & RangeSection::RANGE_SECTION_CODEHEAP) return NULL; #ifdef FEATURE_READYTORUN if (pRS->flags & RangeSection::RANGE_SECTION_READYTORUN) return NULL; #endif return dac_cast<PTR_Module>(pRS->pHeapListOrZapModule); } /* static */ PTR_Module ExecutionManager::FindReadyToRunModule(TADDR currentData) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; STATIC_CONTRACT_HOST_CALLS; SUPPORTS_DAC; } CONTRACTL_END; #ifdef FEATURE_READYTORUN ReaderLockHolder rlh; RangeSection * pRS = GetRangeSection(currentData); if (pRS == NULL) return NULL; if (pRS->flags & RangeSection::RANGE_SECTION_CODEHEAP) return NULL; if (pRS->flags & RangeSection::RANGE_SECTION_READYTORUN) return dac_cast<PTR_Module>(pRS->pHeapListOrZapModule);; return NULL; #else return NULL; #endif } /* static */ PTR_Module ExecutionManager::FindModuleForGCRefMap(TADDR currentData) { CONTRACTL { NOTHROW; GC_NOTRIGGER; SUPPORTS_DAC; } CONTRACTL_END; RangeSection * pRS = FindCodeRange(currentData, ExecutionManager::GetScanFlags()); if (pRS == NULL) return NULL; if (pRS->flags & RangeSection::RANGE_SECTION_CODEHEAP) return NULL; #ifdef FEATURE_READYTORUN // RANGE_SECTION_READYTORUN is intentionally not filtered out here #endif return dac_cast<PTR_Module>(pRS->pHeapListOrZapModule); } #ifndef DACCESS_COMPILE /* NGenMem depends on this entrypoint */ NOINLINE void ExecutionManager::AddCodeRange(TADDR pStartRange, TADDR pEndRange, IJitManager * pJit, RangeSection::RangeSectionFlags flags, void * pHp) { CONTRACTL { THROWS; GC_NOTRIGGER; PRECONDITION(CheckPointer(pJit)); PRECONDITION(CheckPointer(pHp)); } CONTRACTL_END; AddRangeHelper(pStartRange, pEndRange, pJit, flags, dac_cast<TADDR>(pHp)); } void ExecutionManager::AddRangeHelper(TADDR pStartRange, TADDR pEndRange, IJitManager * pJit, RangeSection::RangeSectionFlags flags, TADDR pHeapListOrZapModule) { CONTRACTL { THROWS; GC_NOTRIGGER; HOST_CALLS; PRECONDITION(pStartRange < pEndRange); PRECONDITION(pHeapListOrZapModule != NULL); } CONTRACTL_END; RangeSection *pnewrange = new RangeSection; _ASSERTE(pEndRange > pStartRange); pnewrange->LowAddress = pStartRange; pnewrange->HighAddress = pEndRange; pnewrange->pjit = pJit; pnewrange->pnext = NULL; pnewrange->flags = flags; pnewrange->pLastUsed = NULL; pnewrange->pHeapListOrZapModule = pHeapListOrZapModule; #if defined(TARGET_AMD64) pnewrange->pUnwindInfoTable = NULL; #endif // defined(TARGET_AMD64) { CrstHolder ch(&m_RangeCrst); // Acquire the Crst before linking in a new RangeList RangeSection * current = m_CodeRangeList; RangeSection * previous = NULL; if (current != NULL) { while (true) { // Sort addresses top down so that more recently created ranges // will populate the top of the list if (pnewrange->LowAddress > current->LowAddress) { // Asserts if ranges are overlapping _ASSERTE(pnewrange->LowAddress >= current->HighAddress); pnewrange->pnext = current; if (previous == NULL) // insert new head { m_CodeRangeList = pnewrange; } else { // insert in the middle previous->pnext = pnewrange; } break; } RangeSection * next = current->pnext; if (next == NULL) // insert at end of list { current->pnext = pnewrange; break; } // Continue walking the RangeSection list previous = current; current = next; } } else { m_CodeRangeList = pnewrange; } } } // Deletes a single range starting at pStartRange void ExecutionManager::DeleteRange(TADDR pStartRange) { CONTRACTL { NOTHROW; // If this becomes throwing, then revisit the queuing of deletes below. GC_NOTRIGGER; } CONTRACTL_END; RangeSection *pCurr = NULL; { // Acquire the Crst before unlinking a RangeList. // NOTE: The Crst must be acquired BEFORE we grab the writer lock, as the // writer lock forces us into a forbid suspend thread region, and it's illegal // to enter a Crst after the forbid suspend thread region is entered CrstHolder ch(&m_RangeCrst); // Acquire the WriterLock and prevent any readers from walking the RangeList. // This also forces us to enter a forbid suspend thread region, to prevent // hijacking profilers from grabbing this thread and walking it (the walk may // require the reader lock, which would cause a deadlock). WriterLockHolder wlh; RangeSection *pPrev = NULL; pCurr = GetRangeSectionAndPrev(m_CodeRangeList, pStartRange, &pPrev); // pCurr points at the Range that needs to be unlinked from the RangeList if (pCurr != NULL) { // If pPrev is NULL the the head of this list is to be deleted if (pPrev == NULL) { m_CodeRangeList = pCurr->pnext; } else { _ASSERT(pPrev->pnext == pCurr); pPrev->pnext = pCurr->pnext; } // Clear the cache pLastUsed in the head node (if any) RangeSection * head = m_CodeRangeList; if (head != NULL) { head->pLastUsed = NULL; } // // Cannot delete pCurr here because we own the WriterLock and if this is // a hosted scenario then the hosting api callback cannot occur in a forbid // suspend region, which the writer lock is. // } } // // Now delete the node // if (pCurr != NULL) { #if defined(TARGET_AMD64) if (pCurr->pUnwindInfoTable != 0) delete pCurr->pUnwindInfoTable; #endif // defined(TARGET_AMD64) delete pCurr; } } #endif // #ifndef DACCESS_COMPILE #ifdef DACCESS_COMPILE void ExecutionManager::EnumRangeList(RangeSection* list, CLRDataEnumMemoryFlags flags) { while (list != NULL) { // If we can't read the target memory, stop immediately so we don't work // with broken data. if (!DacEnumMemoryRegion(dac_cast<TADDR>(list), sizeof(*list))) break; if (list->pjit.IsValid()) { list->pjit->EnumMemoryRegions(flags); } if (!(list->flags & RangeSection::RANGE_SECTION_CODEHEAP)) { PTR_Module pModule = dac_cast<PTR_Module>(list->pHeapListOrZapModule); if (pModule.IsValid()) { pModule->EnumMemoryRegions(flags, true); } } list = list->pnext; #if defined (_DEBUG) // Test hook: when testing on debug builds, we want an easy way to test that the while // correctly terminates in the face of ridiculous stuff from the target. if (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_DumpGeneration_IntentionallyCorruptDataFromTarget) == 1) { // Force us to struggle on with something bad. if (list == NULL) { list = (RangeSection *)&flags; } } #endif // (_DEBUG) } } void ExecutionManager::EnumMemoryRegions(CLRDataEnumMemoryFlags flags) { STATIC_CONTRACT_HOST_CALLS; ReaderLockHolder rlh; // // Report the global data portions. // m_CodeRangeList.EnumMem(); m_pDefaultCodeMan.EnumMem(); // // Walk structures and report. // if (m_CodeRangeList.IsValid()) { EnumRangeList(m_CodeRangeList, flags); } } #endif // #ifdef DACCESS_COMPILE #if !defined(DACCESS_COMPILE) void ExecutionManager::Unload(LoaderAllocator *pLoaderAllocator) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; // a size of 0 is a signal to Nirvana to flush the entire cache FlushInstructionCache(GetCurrentProcess(),0,0); /* StackwalkCacheEntry::EIP is an address into code. Since we are unloading the code, we need to invalidate the cache. Otherwise, its possible that another appdomain might generate code at the very same address, and we might incorrectly think that the old StackwalkCacheEntry corresponds to it. So flush the cache. */ StackwalkCache::Invalidate(pLoaderAllocator); JumpStubCache * pJumpStubCache = (JumpStubCache *) pLoaderAllocator->m_pJumpStubCache; if (pJumpStubCache != NULL) { delete pJumpStubCache; pLoaderAllocator->m_pJumpStubCache = NULL; } GetEEJitManager()->Unload(pLoaderAllocator); } // This method is used by the JIT and the runtime for PreStubs. It will return // the address of a short jump thunk that will jump to the 'target' address. // It is only needed when the target architecture has a perferred call instruction // that doesn't actually span the full address space. This is true for x64 where // the preferred call instruction is a 32-bit pc-rel call instruction. // (This is also true on ARM64, but it not true for x86) // // For these architectures, in JITed code and in the prestub, we encode direct calls // using the preferred call instruction and we also try to insure that the Jitted // code is within the 32-bit pc-rel range of clr.dll to allow direct JIT helper calls. // // When the call target is too far away to encode using the preferred call instruction. // We will create a short code thunk that uncoditionally jumps to the target address. // We call this jump thunk a "jumpStub" in the CLR code. // We have the requirement that the "jumpStub" that we create on demand be usable by // the preferred call instruction, this requires that on x64 the location in memory // where we create the "jumpStub" be within the 32-bit pc-rel range of the call that // needs it. // // The arguments to this method: // pMD - the MethodDesc for the currenty managed method in Jitted code // or for the target method for a PreStub // It is required if calling from or to a dynamic method (LCG method) // target - The call target address (this is the address that was too far to encode) // loAddr // hiAddr - The range of the address that we must place the jumpStub in, so that it // can be used to encode the preferred call instruction. // pLoaderAllocator // - The Loader allocator to use for allocations, this can be null. // When it is null, then the pMD must be valid and is used to obtain // the allocator. // // This method will either locate and return an existing jumpStub thunk that can be // reused for this request, because it meets all of the requirements necessary. // Or it will allocate memory in the required region and create a new jumpStub that // meets all of the requirements necessary. // // Note that for dynamic methods (LCG methods) we cannot share the jumpStubs between // different methods. This is because we allow for the unloading (reclaiming) of // individual dynamic methods. And we associate the jumpStub memory allocated with // the dynamic method that requested the jumpStub. // PCODE ExecutionManager::jumpStub(MethodDesc* pMD, PCODE target, BYTE * loAddr, BYTE * hiAddr, LoaderAllocator *pLoaderAllocator, bool throwOnOutOfMemoryWithinRange) { CONTRACT(PCODE) { THROWS; GC_NOTRIGGER; MODE_ANY; PRECONDITION(pLoaderAllocator != NULL || pMD != NULL); PRECONDITION(loAddr < hiAddr); POSTCONDITION((RETVAL != NULL) || !throwOnOutOfMemoryWithinRange); } CONTRACT_END; PCODE jumpStub = NULL; if (pLoaderAllocator == NULL) { pLoaderAllocator = pMD->GetLoaderAllocator(); } _ASSERTE(pLoaderAllocator != NULL); bool isLCG = pMD && pMD->IsLCGMethod(); LCGMethodResolver * pResolver = nullptr; JumpStubCache * pJumpStubCache = (JumpStubCache *) pLoaderAllocator->m_pJumpStubCache; if (isLCG) { pResolver = pMD->AsDynamicMethodDesc()->GetLCGMethodResolver(); pJumpStubCache = pResolver->m_pJumpStubCache; } CrstHolder ch(&m_JumpStubCrst); if (pJumpStubCache == NULL) { pJumpStubCache = new JumpStubCache(); if (isLCG) { pResolver->m_pJumpStubCache = pJumpStubCache; } else { pLoaderAllocator->m_pJumpStubCache = pJumpStubCache; } } if (isLCG) { // Increment counter of LCG jump stub lookup attempts m_LCG_JumpStubLookup++; } else { // Increment counter of normal jump stub lookup attempts m_normal_JumpStubLookup++; } // search for a matching jumpstub in the jumpStubCache // for (JumpStubTable::KeyIterator i = pJumpStubCache->m_Table.Begin(target), end = pJumpStubCache->m_Table.End(target); i != end; i++) { jumpStub = i->m_jumpStub; _ASSERTE(jumpStub != NULL); // Is the matching entry with the requested range? if (((TADDR)loAddr <= jumpStub) && (jumpStub <= (TADDR)hiAddr)) { RETURN(jumpStub); } } // If we get here we need to create a new jump stub // add or change the jump stub table to point at the new one jumpStub = getNextJumpStub(pMD, target, loAddr, hiAddr, pLoaderAllocator, throwOnOutOfMemoryWithinRange); // this statement can throw if (jumpStub == NULL) { _ASSERTE(!throwOnOutOfMemoryWithinRange); RETURN(NULL); } _ASSERTE(((TADDR)loAddr <= jumpStub) && (jumpStub <= (TADDR)hiAddr)); LOG((LF_JIT, LL_INFO10000, "Add JumpStub to" FMT_ADDR "at" FMT_ADDR "\n", DBG_ADDR(target), DBG_ADDR(jumpStub) )); RETURN(jumpStub); } PCODE ExecutionManager::getNextJumpStub(MethodDesc* pMD, PCODE target, BYTE * loAddr, BYTE * hiAddr, LoaderAllocator *pLoaderAllocator, bool throwOnOutOfMemoryWithinRange) { CONTRACT(PCODE) { THROWS; GC_NOTRIGGER; PRECONDITION(pLoaderAllocator != NULL); PRECONDITION(m_JumpStubCrst.OwnedByCurrentThread()); POSTCONDITION((RETVAL != NULL) || !throwOnOutOfMemoryWithinRange); } CONTRACT_END; BYTE * jumpStub = NULL; BYTE * jumpStubRW = NULL; bool isLCG = pMD && pMD->IsLCGMethod(); // For LCG we request a small block of 4 jumpstubs, because we can not share them // with any other methods and very frequently our method only needs one jump stub. // Using 4 gives a request size of (32 + 4*12) or 80 bytes. // Also note that request sizes are rounded up to a multiples of 16. // The request size is calculated into 'blockSize' in allocJumpStubBlock. // For x64 the value of BACK_TO_BACK_JUMP_ALLOCATE_SIZE is 12 bytes // and the sizeof(JumpStubBlockHeader) is 32. // DWORD numJumpStubs = isLCG ? 4 : DEFAULT_JUMPSTUBS_PER_BLOCK; JumpStubCache * pJumpStubCache = (JumpStubCache *) pLoaderAllocator->m_pJumpStubCache; if (isLCG) { LCGMethodResolver * pResolver; pResolver = pMD->AsDynamicMethodDesc()->GetLCGMethodResolver(); pJumpStubCache = pResolver->m_pJumpStubCache; } JumpStubBlockHeader ** ppHead = &(pJumpStubCache->m_pBlocks); JumpStubBlockHeader * curBlock = *ppHead; ExecutableWriterHolder<JumpStubBlockHeader> curBlockWriterHolder; // allocate a new jumpstub from 'curBlock' if it is not fully allocated // while (curBlock) { _ASSERTE(pLoaderAllocator == (isLCG ? curBlock->GetHostCodeHeap()->GetAllocator() : curBlock->GetLoaderAllocator())); if (curBlock->m_used < curBlock->m_allocated) { jumpStub = (BYTE *) curBlock + sizeof(JumpStubBlockHeader) + ((size_t) curBlock->m_used * BACK_TO_BACK_JUMP_ALLOCATE_SIZE); if ((loAddr <= jumpStub) && (jumpStub <= hiAddr)) { // We will update curBlock->m_used at "DONE" size_t blockSize = sizeof(JumpStubBlockHeader) + (size_t) numJumpStubs * BACK_TO_BACK_JUMP_ALLOCATE_SIZE; curBlockWriterHolder = ExecutableWriterHolder<JumpStubBlockHeader>(curBlock, blockSize); jumpStubRW = (BYTE *)((TADDR)jumpStub + (TADDR)curBlockWriterHolder.GetRW() - (TADDR)curBlock); goto DONE; } } curBlock = curBlock->m_next; } // If we get here then we need to allocate a new JumpStubBlock if (isLCG) { #ifdef TARGET_AMD64 // Note this these values are not requirements, instead we are // just confirming the values that are mentioned in the comments. _ASSERTE(BACK_TO_BACK_JUMP_ALLOCATE_SIZE == 12); _ASSERTE(sizeof(JumpStubBlockHeader) == 32); #endif // Increment counter of LCG jump stub block allocations m_LCG_JumpStubBlockAllocCount++; } else { // Increment counter of normal jump stub block allocations m_normal_JumpStubBlockAllocCount++; } // allocJumpStubBlock will allocate from the LoaderCodeHeap for normal methods // and will allocate from a HostCodeHeap for LCG methods. // // note that this can throw an OOM exception curBlock = ExecutionManager::GetEEJitManager()->allocJumpStubBlock(pMD, numJumpStubs, loAddr, hiAddr, pLoaderAllocator, throwOnOutOfMemoryWithinRange); if (curBlock == NULL) { _ASSERTE(!throwOnOutOfMemoryWithinRange); RETURN(NULL); } curBlockWriterHolder = ExecutableWriterHolder<JumpStubBlockHeader>(curBlock, sizeof(JumpStubBlockHeader) + ((size_t) (curBlock->m_used + 1) * BACK_TO_BACK_JUMP_ALLOCATE_SIZE)); jumpStubRW = (BYTE *) curBlockWriterHolder.GetRW() + sizeof(JumpStubBlockHeader) + ((size_t) curBlock->m_used * BACK_TO_BACK_JUMP_ALLOCATE_SIZE); jumpStub = (BYTE *) curBlock + sizeof(JumpStubBlockHeader) + ((size_t) curBlock->m_used * BACK_TO_BACK_JUMP_ALLOCATE_SIZE); _ASSERTE((loAddr <= jumpStub) && (jumpStub <= hiAddr)); curBlockWriterHolder.GetRW()->m_next = *ppHead; *ppHead = curBlock; DONE: _ASSERTE((curBlock->m_used < curBlock->m_allocated)); #ifdef TARGET_ARM64 // 8-byte alignment is required on ARM64 _ASSERTE(((UINT_PTR)jumpStub & 7) == 0); #endif emitBackToBackJump(jumpStub, jumpStubRW, (void*) target); #ifdef FEATURE_PERFMAP PerfMap::LogStubs(__FUNCTION__, "emitBackToBackJump", (PCODE)jumpStub, BACK_TO_BACK_JUMP_ALLOCATE_SIZE); #endif // We always add the new jumpstub to the jumpStubCache // _ASSERTE(pJumpStubCache != NULL); JumpStubEntry entry; entry.m_target = target; entry.m_jumpStub = (PCODE)jumpStub; pJumpStubCache->m_Table.Add(entry); curBlockWriterHolder.GetRW()->m_used++; // record that we have used up one more jumpStub in the block // Every time we create a new jumpStub thunk one of these counters is incremented if (isLCG) { // Increment counter of LCG unique jump stubs m_LCG_JumpStubUnique++; } else { // Increment counter of normal unique jump stubs m_normal_JumpStubUnique++; } // Is the 'curBlock' now completely full? if (curBlock->m_used == curBlock->m_allocated) { if (isLCG) { // Increment counter of LCG jump stub blocks that are full m_LCG_JumpStubBlockFullCount++; // Log this "LCG JumpStubBlock filled" along with the four counter values STRESS_LOG4(LF_JIT, LL_INFO1000, "LCG JumpStubBlock filled - (%u, %u, %u, %u)\n", m_LCG_JumpStubLookup, m_LCG_JumpStubUnique, m_LCG_JumpStubBlockAllocCount, m_LCG_JumpStubBlockFullCount); } else { // Increment counter of normal jump stub blocks that are full m_normal_JumpStubBlockFullCount++; // Log this "normal JumpStubBlock filled" along with the four counter values STRESS_LOG4(LF_JIT, LL_INFO1000, "Normal JumpStubBlock filled - (%u, %u, %u, %u)\n", m_normal_JumpStubLookup, m_normal_JumpStubUnique, m_normal_JumpStubBlockAllocCount, m_normal_JumpStubBlockFullCount); if ((m_LCG_JumpStubLookup > 0) && ((m_normal_JumpStubBlockFullCount % 5) == 1)) { // Every 5 occurrence of the above we also // Log "LCG JumpStubBlock status" along with the four counter values STRESS_LOG4(LF_JIT, LL_INFO1000, "LCG JumpStubBlock status - (%u, %u, %u, %u)\n", m_LCG_JumpStubLookup, m_LCG_JumpStubUnique, m_LCG_JumpStubBlockAllocCount, m_LCG_JumpStubBlockFullCount); } } } RETURN((PCODE)jumpStub); } #endif // !DACCESS_COMPILE static void GetFuncletStartOffsetsHelper(PCODE pCodeStart, SIZE_T size, SIZE_T ofsAdj, PTR_RUNTIME_FUNCTION pFunctionEntry, TADDR moduleBase, DWORD * pnFunclets, DWORD* pStartFuncletOffsets, DWORD dwLength) { _ASSERTE(FitsInU4((pCodeStart + size) - moduleBase)); DWORD endAddress = (DWORD)((pCodeStart + size) - moduleBase); // Entries are sorted and terminated by sentinel value (DWORD)-1 for (; RUNTIME_FUNCTION__BeginAddress(pFunctionEntry) < endAddress; pFunctionEntry++) { #ifdef TARGET_AMD64 _ASSERTE((pFunctionEntry->UnwindData & RUNTIME_FUNCTION_INDIRECT) == 0); #endif #if defined(EXCEPTION_DATA_SUPPORTS_FUNCTION_FRAGMENTS) if (IsFunctionFragment(moduleBase, pFunctionEntry)) { // This is a fragment (not the funclet beginning); skip it continue; } #endif // EXCEPTION_DATA_SUPPORTS_FUNCTION_FRAGMENTS if (*pnFunclets < dwLength) { TADDR funcletStartAddress = (moduleBase + RUNTIME_FUNCTION__BeginAddress(pFunctionEntry)) + ofsAdj; _ASSERTE(FitsInU4(funcletStartAddress - pCodeStart)); pStartFuncletOffsets[*pnFunclets] = (DWORD)(funcletStartAddress - pCodeStart); } (*pnFunclets)++; } } #if defined(FEATURE_EH_FUNCLETS) && defined(DACCESS_COMPILE) // // To locate an entry in the function entry table (the program exceptions data directory), the debugger // performs a binary search over the table. This function reports the entries that are encountered in the // binary search. // // Parameters: // pRtf: The target function table entry to be located // pNativeLayout: A pointer to the loaded native layout for the module containing pRtf // static void EnumRuntimeFunctionEntriesToFindEntry(PTR_RUNTIME_FUNCTION pRtf, PTR_PEImageLayout pNativeLayout) { pRtf.EnumMem(); if (pNativeLayout == NULL) { return; } IMAGE_DATA_DIRECTORY * pProgramExceptionsDirectory = pNativeLayout->GetDirectoryEntry(IMAGE_DIRECTORY_ENTRY_EXCEPTION); if (!pProgramExceptionsDirectory || (pProgramExceptionsDirectory->Size == 0) || (pProgramExceptionsDirectory->Size % sizeof(T_RUNTIME_FUNCTION) != 0)) { // Program exceptions directory malformatted return; } PTR_BYTE moduleBase(pNativeLayout->GetBase()); PTR_RUNTIME_FUNCTION firstFunctionEntry(moduleBase + pProgramExceptionsDirectory->VirtualAddress); if (pRtf < firstFunctionEntry || ((dac_cast<TADDR>(pRtf) - dac_cast<TADDR>(firstFunctionEntry)) % sizeof(T_RUNTIME_FUNCTION) != 0)) { // Program exceptions directory malformatted return; } // Review conversion of size_t to ULONG. #if defined(_MSC_VER) #pragma warning(push) #pragma warning(disable:4267) #endif // defined(_MSC_VER) ULONG indexToLocate = pRtf - firstFunctionEntry; #if defined(_MSC_VER) #pragma warning(pop) #endif // defined(_MSC_VER) ULONG low = 0; // index in the function entry table of low end of search range ULONG high = (pProgramExceptionsDirectory->Size) / sizeof(T_RUNTIME_FUNCTION) - 1; // index of high end of search range ULONG mid = (low + high) / 2; // index of entry to be compared if (indexToLocate > high) { return; } while (indexToLocate != mid) { PTR_RUNTIME_FUNCTION functionEntry = firstFunctionEntry + mid; functionEntry.EnumMem(); if (indexToLocate > mid) { low = mid + 1; } else { high = mid - 1; } mid = (low + high) / 2; _ASSERTE(low <= mid && mid <= high); } } #endif // FEATURE_EH_FUNCLETS #if defined(FEATURE_READYTORUN) // Return start of exception info for a method, or 0 if the method has no EH info DWORD NativeExceptionInfoLookupTable::LookupExceptionInfoRVAForMethod(PTR_CORCOMPILE_EXCEPTION_LOOKUP_TABLE pExceptionLookupTable, COUNT_T numLookupEntries, DWORD methodStartRVA, COUNT_T* pSize) { CONTRACTL { NOTHROW; GC_NOTRIGGER; SUPPORTS_DAC; } CONTRACTL_END; _ASSERTE(pExceptionLookupTable != NULL); COUNT_T start = 0; COUNT_T end = numLookupEntries - 2; // The last entry in the lookup table (end-1) points to a sentinal entry. // The sentinal entry helps to determine the number of EH clauses for the last table entry. _ASSERTE(pExceptionLookupTable->ExceptionLookupEntry(numLookupEntries-1)->MethodStartRVA == (DWORD)-1); // Binary search the lookup table // Using linear search is faster once we get down to small number of entries. while (end - start > 10) { COUNT_T middle = start + (end - start) / 2; _ASSERTE(start < middle && middle < end); DWORD rva = pExceptionLookupTable->ExceptionLookupEntry(middle)->MethodStartRVA; if (methodStartRVA < rva) { end = middle - 1; } else { start = middle; } } for (COUNT_T i = start; i <= end; ++i) { DWORD rva = pExceptionLookupTable->ExceptionLookupEntry(i)->MethodStartRVA; if (methodStartRVA == rva) { CORCOMPILE_EXCEPTION_LOOKUP_TABLE_ENTRY *pEntry = pExceptionLookupTable->ExceptionLookupEntry(i); //Get the count of EH Clause entries CORCOMPILE_EXCEPTION_LOOKUP_TABLE_ENTRY * pNextEntry = pExceptionLookupTable->ExceptionLookupEntry(i + 1); *pSize = pNextEntry->ExceptionInfoRVA - pEntry->ExceptionInfoRVA; return pEntry->ExceptionInfoRVA; } } // Not found return 0; } int NativeUnwindInfoLookupTable::LookupUnwindInfoForMethod(DWORD RelativePc, PTR_RUNTIME_FUNCTION pRuntimeFunctionTable, int Low, int High) { CONTRACTL { NOTHROW; GC_NOTRIGGER; SUPPORTS_DAC; } CONTRACTL_END; #ifdef TARGET_ARM RelativePc |= THUMB_CODE; #endif // Entries are sorted and terminated by sentinel value (DWORD)-1 // Binary search the RUNTIME_FUNCTION table // Use linear search once we get down to a small number of elements // to avoid Binary search overhead. while (High - Low > 10) { int Middle = Low + (High - Low) / 2; PTR_RUNTIME_FUNCTION pFunctionEntry = pRuntimeFunctionTable + Middle; if (RelativePc < pFunctionEntry->BeginAddress) { High = Middle - 1; } else { Low = Middle; } } for (int i = Low; i <= High; ++i) { // This is safe because of entries are terminated by sentinel value (DWORD)-1 PTR_RUNTIME_FUNCTION pNextFunctionEntry = pRuntimeFunctionTable + (i + 1); if (RelativePc < pNextFunctionEntry->BeginAddress) { PTR_RUNTIME_FUNCTION pFunctionEntry = pRuntimeFunctionTable + i; if (RelativePc >= pFunctionEntry->BeginAddress) { return i; } break; } } return -1; } //*************************************************************************************** //*************************************************************************************** #ifndef DACCESS_COMPILE ReadyToRunJitManager::ReadyToRunJitManager() { WRAPPER_NO_CONTRACT; } #endif // #ifndef DACCESS_COMPILE ReadyToRunInfo * ReadyToRunJitManager::JitTokenToReadyToRunInfo(const METHODTOKEN& MethodToken) { CONTRACTL { NOTHROW; GC_NOTRIGGER; HOST_NOCALLS; SUPPORTS_DAC; } CONTRACTL_END; return dac_cast<PTR_Module>(MethodToken.m_pRangeSection->pHeapListOrZapModule)->GetReadyToRunInfo(); } UINT32 ReadyToRunJitManager::JitTokenToGCInfoVersion(const METHODTOKEN& MethodToken) { CONTRACTL{ NOTHROW; GC_NOTRIGGER; HOST_NOCALLS; SUPPORTS_DAC; } CONTRACTL_END; READYTORUN_HEADER * header = JitTokenToReadyToRunInfo(MethodToken)->GetReadyToRunHeader(); return GCInfoToken::ReadyToRunVersionToGcInfoVersion(header->MajorVersion); } PTR_RUNTIME_FUNCTION ReadyToRunJitManager::JitTokenToRuntimeFunction(const METHODTOKEN& MethodToken) { CONTRACTL { NOTHROW; GC_NOTRIGGER; HOST_NOCALLS; SUPPORTS_DAC; } CONTRACTL_END; return dac_cast<PTR_RUNTIME_FUNCTION>(MethodToken.m_pCodeHeader); } TADDR ReadyToRunJitManager::JitTokenToStartAddress(const METHODTOKEN& MethodToken) { CONTRACTL { NOTHROW; GC_NOTRIGGER; HOST_NOCALLS; SUPPORTS_DAC; } CONTRACTL_END; return JitTokenToModuleBase(MethodToken) + RUNTIME_FUNCTION__BeginAddress(dac_cast<PTR_RUNTIME_FUNCTION>(MethodToken.m_pCodeHeader)); } GCInfoToken ReadyToRunJitManager::GetGCInfoToken(const METHODTOKEN& MethodToken) { CONTRACTL { NOTHROW; GC_NOTRIGGER; HOST_NOCALLS; SUPPORTS_DAC; } CONTRACTL_END; PTR_RUNTIME_FUNCTION pRuntimeFunction = JitTokenToRuntimeFunction(MethodToken); TADDR baseAddress = JitTokenToModuleBase(MethodToken); #ifndef DACCESS_COMPILE if (g_IBCLogger.InstrEnabled()) { ReadyToRunInfo * pInfo = JitTokenToReadyToRunInfo(MethodToken); MethodDesc * pMD = pInfo->GetMethodDescForEntryPoint(JitTokenToStartAddress(MethodToken)); g_IBCLogger.LogMethodGCInfoAccess(pMD); } #endif SIZE_T nUnwindDataSize; PTR_VOID pUnwindData = GetUnwindDataBlob(baseAddress, pRuntimeFunction, &nUnwindDataSize); // GCInfo immediatelly follows unwind data PTR_BYTE gcInfo = dac_cast<PTR_BYTE>(pUnwindData) + nUnwindDataSize; UINT32 gcInfoVersion = JitTokenToGCInfoVersion(MethodToken); return{ gcInfo, gcInfoVersion }; } unsigned ReadyToRunJitManager::InitializeEHEnumeration(const METHODTOKEN& MethodToken, EH_CLAUSE_ENUMERATOR* pEnumState) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; ReadyToRunInfo * pReadyToRunInfo = JitTokenToReadyToRunInfo(MethodToken); IMAGE_DATA_DIRECTORY * pExceptionInfoDir = pReadyToRunInfo->FindSection(ReadyToRunSectionType::ExceptionInfo); if (pExceptionInfoDir == NULL) return 0; PEImageLayout * pLayout = pReadyToRunInfo->GetImage(); PTR_CORCOMPILE_EXCEPTION_LOOKUP_TABLE pExceptionLookupTable = dac_cast<PTR_CORCOMPILE_EXCEPTION_LOOKUP_TABLE>(pLayout->GetRvaData(pExceptionInfoDir->VirtualAddress)); COUNT_T numLookupTableEntries = (COUNT_T)(pExceptionInfoDir->Size / sizeof(CORCOMPILE_EXCEPTION_LOOKUP_TABLE_ENTRY)); // at least 2 entries (1 valid entry + 1 sentinal entry) _ASSERTE(numLookupTableEntries >= 2); DWORD methodStartRVA = (DWORD)(JitTokenToStartAddress(MethodToken) - JitTokenToModuleBase(MethodToken)); COUNT_T ehInfoSize = 0; DWORD exceptionInfoRVA = NativeExceptionInfoLookupTable::LookupExceptionInfoRVAForMethod(pExceptionLookupTable, numLookupTableEntries, methodStartRVA, &ehInfoSize); if (exceptionInfoRVA == 0) return 0; pEnumState->iCurrentPos = 0; pEnumState->pExceptionClauseArray = JitTokenToModuleBase(MethodToken) + exceptionInfoRVA; return ehInfoSize / sizeof(CORCOMPILE_EXCEPTION_CLAUSE); } PTR_EXCEPTION_CLAUSE_TOKEN ReadyToRunJitManager::GetNextEHClause(EH_CLAUSE_ENUMERATOR* pEnumState, EE_ILEXCEPTION_CLAUSE* pEHClauseOut) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; unsigned iCurrentPos = pEnumState->iCurrentPos; pEnumState->iCurrentPos++; CORCOMPILE_EXCEPTION_CLAUSE* pClause = &(dac_cast<PTR_CORCOMPILE_EXCEPTION_CLAUSE>(pEnumState->pExceptionClauseArray)[iCurrentPos]); // copy to the input parmeter, this is a nice abstraction for the future // if we want to compress the Clause encoding, we can do without affecting the call sites pEHClauseOut->TryStartPC = pClause->TryStartPC; pEHClauseOut->TryEndPC = pClause->TryEndPC; pEHClauseOut->HandlerStartPC = pClause->HandlerStartPC; pEHClauseOut->HandlerEndPC = pClause->HandlerEndPC; pEHClauseOut->Flags = pClause->Flags; pEHClauseOut->FilterOffset = pClause->FilterOffset; return dac_cast<PTR_EXCEPTION_CLAUSE_TOKEN>(pClause); } StubCodeBlockKind ReadyToRunJitManager::GetStubCodeBlockKind(RangeSection * pRangeSection, PCODE currentPC) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; SUPPORTS_DAC; } CONTRACTL_END; DWORD rva = (DWORD)(currentPC - pRangeSection->LowAddress); PTR_ReadyToRunInfo pReadyToRunInfo = dac_cast<PTR_Module>(pRangeSection->pHeapListOrZapModule)->GetReadyToRunInfo(); PTR_IMAGE_DATA_DIRECTORY pDelayLoadMethodCallThunksDir = pReadyToRunInfo->GetDelayMethodCallThunksSection(); if (pDelayLoadMethodCallThunksDir != NULL) { if (pDelayLoadMethodCallThunksDir->VirtualAddress <= rva && rva < pDelayLoadMethodCallThunksDir->VirtualAddress + pDelayLoadMethodCallThunksDir->Size) return STUB_CODE_BLOCK_METHOD_CALL_THUNK; } return STUB_CODE_BLOCK_UNKNOWN; } #ifndef DACCESS_COMPILE TypeHandle ReadyToRunJitManager::ResolveEHClause(EE_ILEXCEPTION_CLAUSE* pEHClause, CrawlFrame* pCf) { CONTRACTL { THROWS; GC_TRIGGERS; } CONTRACTL_END; _ASSERTE(NULL != pCf); _ASSERTE(NULL != pEHClause); _ASSERTE(IsTypedHandler(pEHClause)); MethodDesc *pMD = PTR_MethodDesc(pCf->GetFunction()); _ASSERTE(pMD != NULL); Module* pModule = pMD->GetModule(); PREFIX_ASSUME(pModule != NULL); SigTypeContext typeContext(pMD); VarKind k = hasNoVars; mdToken typeTok = pEHClause->ClassToken; // In the vast majority of cases the code un der the "if" below // will not be executed. // // First grab the representative instantiations. For code // shared by multiple generic instantiations these are the // canonical (representative) instantiation. if (TypeFromToken(typeTok) == mdtTypeSpec) { PCCOR_SIGNATURE pSig; ULONG cSig; IfFailThrow(pModule->GetMDImport()->GetTypeSpecFromToken(typeTok, &pSig, &cSig)); SigPointer psig(pSig, cSig); k = psig.IsPolyType(&typeContext); // Grab the active class and method instantiation. This exact instantiation is only // needed in the corner case of "generic" exception catching in shared // generic code. We don't need the exact instantiation if the token // doesn't contain E_T_VAR or E_T_MVAR. if ((k & hasSharableVarsMask) != 0) { Instantiation classInst; Instantiation methodInst; pCf->GetExactGenericInstantiations(&classInst,&methodInst); SigTypeContext::InitTypeContext(pMD,classInst, methodInst,&typeContext); } } return ClassLoader::LoadTypeDefOrRefOrSpecThrowing(pModule, typeTok, &typeContext, ClassLoader::ReturnNullIfNotFound); } #endif // #ifndef DACCESS_COMPILE //----------------------------------------------------------------------------- // Ngen info manager //----------------------------------------------------------------------------- BOOL ReadyToRunJitManager::GetBoundariesAndVars( const DebugInfoRequest & request, IN FP_IDS_NEW fpNew, IN void * pNewData, OUT ULONG32 * pcMap, OUT ICorDebugInfo::OffsetMapping **ppMap, OUT ULONG32 * pcVars, OUT ICorDebugInfo::NativeVarInfo **ppVars) { CONTRACTL { THROWS; // on OOM. GC_NOTRIGGER; // getting vars shouldn't trigger SUPPORTS_DAC; } CONTRACTL_END; EECodeInfo codeInfo(request.GetStartAddress()); if (!codeInfo.IsValid()) return FALSE; ReadyToRunInfo * pReadyToRunInfo = JitTokenToReadyToRunInfo(codeInfo.GetMethodToken()); PTR_RUNTIME_FUNCTION pRuntimeFunction = JitTokenToRuntimeFunction(codeInfo.GetMethodToken()); PTR_BYTE pDebugInfo = pReadyToRunInfo->GetDebugInfo(pRuntimeFunction); if (pDebugInfo == NULL) return FALSE; // Uncompress. This allocates memory and may throw. CompressDebugInfo::RestoreBoundariesAndVars( fpNew, pNewData, // allocators pDebugInfo, // input pcMap, ppMap, // output pcVars, ppVars, // output FALSE); // no patchpoint info return TRUE; } #ifdef DACCESS_COMPILE // // Need to write out debug info // void ReadyToRunJitManager::EnumMemoryRegionsForMethodDebugInfo(CLRDataEnumMemoryFlags flags, MethodDesc * pMD) { SUPPORTS_DAC; EECodeInfo codeInfo(pMD->GetNativeCode()); if (!codeInfo.IsValid()) return; ReadyToRunInfo * pReadyToRunInfo = JitTokenToReadyToRunInfo(codeInfo.GetMethodToken()); PTR_RUNTIME_FUNCTION pRuntimeFunction = JitTokenToRuntimeFunction(codeInfo.GetMethodToken()); PTR_BYTE pDebugInfo = pReadyToRunInfo->GetDebugInfo(pRuntimeFunction); if (pDebugInfo == NULL) return; CompressDebugInfo::EnumMemoryRegions(flags, pDebugInfo, FALSE); } #endif PCODE ReadyToRunJitManager::GetCodeAddressForRelOffset(const METHODTOKEN& MethodToken, DWORD relOffset) { WRAPPER_NO_CONTRACT; MethodRegionInfo methodRegionInfo; JitTokenToMethodRegionInfo(MethodToken, &methodRegionInfo); if (relOffset < methodRegionInfo.hotSize) return methodRegionInfo.hotStartAddress + relOffset; SIZE_T coldOffset = relOffset - methodRegionInfo.hotSize; _ASSERTE(coldOffset < methodRegionInfo.coldSize); return methodRegionInfo.coldStartAddress + coldOffset; } BOOL ReadyToRunJitManager::JitCodeToMethodInfo(RangeSection * pRangeSection, PCODE currentPC, MethodDesc** ppMethodDesc, OUT EECodeInfo * pCodeInfo) { CONTRACTL { NOTHROW; GC_NOTRIGGER; SUPPORTS_DAC; } CONTRACTL_END; // READYTORUN: FUTURE: Hot-cold spliting // If the address is in a thunk, return NULL. if (GetStubCodeBlockKind(pRangeSection, currentPC) != STUB_CODE_BLOCK_UNKNOWN) { return FALSE; } TADDR currentInstr = PCODEToPINSTR(currentPC); TADDR ImageBase = pRangeSection->LowAddress; DWORD RelativePc = (DWORD)(currentInstr - ImageBase); Module * pModule = dac_cast<PTR_Module>(pRangeSection->pHeapListOrZapModule); ReadyToRunInfo * pInfo = pModule->GetReadyToRunInfo(); COUNT_T nRuntimeFunctions = pInfo->m_nRuntimeFunctions; PTR_RUNTIME_FUNCTION pRuntimeFunctions = pInfo->m_pRuntimeFunctions; int MethodIndex = NativeUnwindInfoLookupTable::LookupUnwindInfoForMethod(RelativePc, pRuntimeFunctions, 0, nRuntimeFunctions - 1); if (MethodIndex < 0) return FALSE; if (ppMethodDesc == NULL && pCodeInfo == NULL) { // Bail early if caller doesn't care about the MethodDesc or EECodeInfo. // Avoiding the method desc lookups below also prevents deadlocks when this // is called from IsManagedCode. return TRUE; } #ifdef FEATURE_EH_FUNCLETS // Save the raw entry PTR_RUNTIME_FUNCTION RawFunctionEntry = pRuntimeFunctions + MethodIndex; MethodDesc *pMethodDesc; while ((pMethodDesc = pInfo->GetMethodDescForEntryPoint(ImageBase + RUNTIME_FUNCTION__BeginAddress(pRuntimeFunctions + MethodIndex))) == NULL) MethodIndex--; #endif PTR_RUNTIME_FUNCTION FunctionEntry = pRuntimeFunctions + MethodIndex; if (ppMethodDesc) { #ifdef FEATURE_EH_FUNCLETS *ppMethodDesc = pMethodDesc; #else *ppMethodDesc = pInfo->GetMethodDescForEntryPoint(ImageBase + RUNTIME_FUNCTION__BeginAddress(FunctionEntry)); #endif _ASSERTE(*ppMethodDesc != NULL); } if (pCodeInfo) { pCodeInfo->m_relOffset = (DWORD) (RelativePc - RUNTIME_FUNCTION__BeginAddress(FunctionEntry)); // We are using RUNTIME_FUNCTION as METHODTOKEN pCodeInfo->m_methodToken = METHODTOKEN(pRangeSection, dac_cast<TADDR>(FunctionEntry)); #ifdef FEATURE_EH_FUNCLETS AMD64_ONLY(_ASSERTE((RawFunctionEntry->UnwindData & RUNTIME_FUNCTION_INDIRECT) == 0)); pCodeInfo->m_pFunctionEntry = RawFunctionEntry; #endif } return TRUE; } #if defined(FEATURE_EH_FUNCLETS) PTR_RUNTIME_FUNCTION ReadyToRunJitManager::LazyGetFunctionEntry(EECodeInfo * pCodeInfo) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; if (!pCodeInfo->IsValid()) { return NULL; } // code:ReadyToRunJitManager::JitCodeToMethodInfo computes PTR_RUNTIME_FUNCTION eagerly. This path is only // reachable via EECodeInfo::GetMainFunctionInfo, and so we can just return the main entry. _ASSERTE(pCodeInfo->GetRelOffset() == 0); return dac_cast<PTR_RUNTIME_FUNCTION>(pCodeInfo->GetMethodToken().m_pCodeHeader); } TADDR ReadyToRunJitManager::GetFuncletStartAddress(EECodeInfo * pCodeInfo) { LIMITED_METHOD_DAC_CONTRACT; // READYTORUN: FUTURE: Hot-cold spliting return IJitManager::GetFuncletStartAddress(pCodeInfo); } DWORD ReadyToRunJitManager::GetFuncletStartOffsets(const METHODTOKEN& MethodToken, DWORD* pStartFuncletOffsets, DWORD dwLength) { PTR_RUNTIME_FUNCTION pFirstFuncletFunctionEntry = dac_cast<PTR_RUNTIME_FUNCTION>(MethodToken.m_pCodeHeader) + 1; TADDR moduleBase = JitTokenToModuleBase(MethodToken); DWORD nFunclets = 0; MethodRegionInfo regionInfo; JitTokenToMethodRegionInfo(MethodToken, &regionInfo); // pFirstFuncletFunctionEntry will work for ARM when passed to GetFuncletStartOffsetsHelper() // even if it is a fragment of the main body and not a RUNTIME_FUNCTION for the beginning // of the first hot funclet, because GetFuncletStartOffsetsHelper() will skip all the function // fragments until the first funclet, if any, is found. GetFuncletStartOffsetsHelper(regionInfo.hotStartAddress, regionInfo.hotSize, 0, pFirstFuncletFunctionEntry, moduleBase, &nFunclets, pStartFuncletOffsets, dwLength); // READYTORUN: FUTURE: Hot/cold splitting return nFunclets; } BOOL ReadyToRunJitManager::IsFilterFunclet(EECodeInfo * pCodeInfo) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; if (!pCodeInfo->IsFunclet()) return FALSE; // Get address of the personality routine for the function being queried. SIZE_T size; PTR_VOID pUnwindData = GetUnwindDataBlob(pCodeInfo->GetModuleBase(), pCodeInfo->GetFunctionEntry(), &size); _ASSERTE(pUnwindData != NULL); // Personality routine is always the last element of the unwind data DWORD rvaPersonalityRoutine = *(dac_cast<PTR_DWORD>(dac_cast<TADDR>(pUnwindData) + size) - 1); // Get the personality routine for the first function in the module, which is guaranteed to be not a funclet. ReadyToRunInfo * pInfo = JitTokenToReadyToRunInfo(pCodeInfo->GetMethodToken()); if (pInfo->m_nRuntimeFunctions == 0) return FALSE; PTR_VOID pFirstUnwindData = GetUnwindDataBlob(pCodeInfo->GetModuleBase(), pInfo->m_pRuntimeFunctions, &size); _ASSERTE(pFirstUnwindData != NULL); DWORD rvaFirstPersonalityRoutine = *(dac_cast<PTR_DWORD>(dac_cast<TADDR>(pFirstUnwindData) + size) - 1); // Compare the two personality routines. If they are different, then the current function is a filter funclet. BOOL fRet = (rvaPersonalityRoutine != rvaFirstPersonalityRoutine); // Verify that the optimized implementation is in sync with the slow implementation _ASSERTE(fRet == IJitManager::IsFilterFunclet(pCodeInfo)); return fRet; } #endif // FEATURE_EH_FUNCLETS void ReadyToRunJitManager::JitTokenToMethodRegionInfo(const METHODTOKEN& MethodToken, MethodRegionInfo * methodRegionInfo) { CONTRACTL { NOTHROW; GC_NOTRIGGER; HOST_NOCALLS; SUPPORTS_DAC; PRECONDITION(methodRegionInfo != NULL); } CONTRACTL_END; // READYTORUN: FUTURE: Hot-cold spliting methodRegionInfo->hotStartAddress = JitTokenToStartAddress(MethodToken); methodRegionInfo->hotSize = GetCodeManager()->GetFunctionSize(GetGCInfoToken(MethodToken)); methodRegionInfo->coldStartAddress = 0; methodRegionInfo->coldSize = 0; } #ifdef DACCESS_COMPILE void ReadyToRunJitManager::EnumMemoryRegions(CLRDataEnumMemoryFlags flags) { IJitManager::EnumMemoryRegions(flags); } #if defined(FEATURE_EH_FUNCLETS) // // EnumMemoryRegionsForMethodUnwindInfo - enumerate the memory necessary to read the unwind info for the // specified method. // // Note that in theory, a dump generation library could save the unwind information itself without help // from us, since it's stored in the image in the standard function table layout for Win64. However, // dump-generation libraries assume that the image will be available at debug time, and if the image // isn't available then it is acceptable for stackwalking to break. For ngen images (which are created // on the client), it usually isn't possible to have the image available at debug time, and so for minidumps // we must explicitly ensure the unwind information is saved into the dump. // // Arguments: // flags - EnumMem flags // pMD - MethodDesc for the method in question // void ReadyToRunJitManager::EnumMemoryRegionsForMethodUnwindInfo(CLRDataEnumMemoryFlags flags, EECodeInfo * pCodeInfo) { // Get the RUNTIME_FUNCTION entry for this method PTR_RUNTIME_FUNCTION pRtf = pCodeInfo->GetFunctionEntry(); if (pRtf==NULL) { return; } // Enumerate the function entry and other entries needed to locate it in the program exceptions directory ReadyToRunInfo * pReadyToRunInfo = JitTokenToReadyToRunInfo(pCodeInfo->GetMethodToken()); EnumRuntimeFunctionEntriesToFindEntry(pRtf, pReadyToRunInfo->GetImage()); SIZE_T size; PTR_VOID pUnwindData = GetUnwindDataBlob(pCodeInfo->GetModuleBase(), pRtf, &size); if (pUnwindData != NULL) DacEnumMemoryRegion(PTR_TO_TADDR(pUnwindData), size); } #endif //FEATURE_EH_FUNCLETS #endif // #ifdef DACCESS_COMPILE #endif
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // codeman.cpp - a managment class for handling multiple code managers // // #include "common.h" #include "jitinterface.h" #include "corjit.h" #include "jithost.h" #include "eetwain.h" #include "eeconfig.h" #include "excep.h" #include "appdomain.hpp" #include "codeman.h" #include "nibblemapmacros.h" #include "generics.h" #include "dynamicmethod.h" #include "eemessagebox.h" #include "eventtrace.h" #include "threadsuspend.h" #include "exceptionhandling.h" #include "rtlfunctions.h" #include "shimload.h" #include "debuginfostore.h" #include "strsafe.h" #include "configuration.h" #ifdef HOST_64BIT #define CHECK_DUPLICATED_STRUCT_LAYOUTS #include "../debug/daccess/fntableaccess.h" #endif // HOST_64BIT #ifdef FEATURE_PERFMAP #include "perfmap.h" #endif // Default number of jump stubs in a jump stub block #define DEFAULT_JUMPSTUBS_PER_BLOCK 32 SPTR_IMPL(EECodeManager, ExecutionManager, m_pDefaultCodeMan); SPTR_IMPL(EEJitManager, ExecutionManager, m_pEEJitManager); #ifdef FEATURE_READYTORUN SPTR_IMPL(ReadyToRunJitManager, ExecutionManager, m_pReadyToRunJitManager); #endif #ifndef DACCESS_COMPILE Volatile<RangeSection *> ExecutionManager::m_CodeRangeList = NULL; Volatile<LONG> ExecutionManager::m_dwReaderCount = 0; Volatile<LONG> ExecutionManager::m_dwWriterLock = 0; #else SPTR_IMPL(RangeSection, ExecutionManager, m_CodeRangeList); SVAL_IMPL(LONG, ExecutionManager, m_dwReaderCount); SVAL_IMPL(LONG, ExecutionManager, m_dwWriterLock); #endif #ifndef DACCESS_COMPILE CrstStatic ExecutionManager::m_JumpStubCrst; CrstStatic ExecutionManager::m_RangeCrst; unsigned ExecutionManager::m_normal_JumpStubLookup; unsigned ExecutionManager::m_normal_JumpStubUnique; unsigned ExecutionManager::m_normal_JumpStubBlockAllocCount; unsigned ExecutionManager::m_normal_JumpStubBlockFullCount; unsigned ExecutionManager::m_LCG_JumpStubLookup; unsigned ExecutionManager::m_LCG_JumpStubUnique; unsigned ExecutionManager::m_LCG_JumpStubBlockAllocCount; unsigned ExecutionManager::m_LCG_JumpStubBlockFullCount; #endif // DACCESS_COMPILE #if defined(TARGET_AMD64) && !defined(DACCESS_COMPILE) // We don't do this on ARM just amd64 // Support for new style unwind information (to allow OS to stack crawl JIT compiled code). typedef NTSTATUS (WINAPI* RtlAddGrowableFunctionTableFnPtr) ( PVOID *DynamicTable, PRUNTIME_FUNCTION FunctionTable, ULONG EntryCount, ULONG MaximumEntryCount, ULONG_PTR rangeStart, ULONG_PTR rangeEnd); typedef VOID (WINAPI* RtlGrowFunctionTableFnPtr) (PVOID DynamicTable, ULONG NewEntryCount); typedef VOID (WINAPI* RtlDeleteGrowableFunctionTableFnPtr) (PVOID DynamicTable); // OS entry points (only exist on Win8 and above) static RtlAddGrowableFunctionTableFnPtr pRtlAddGrowableFunctionTable; static RtlGrowFunctionTableFnPtr pRtlGrowFunctionTable; static RtlDeleteGrowableFunctionTableFnPtr pRtlDeleteGrowableFunctionTable; static Volatile<bool> RtlUnwindFtnsInited; // statics for UnwindInfoTable Crst* UnwindInfoTable::s_pUnwindInfoTableLock = NULL; Volatile<bool> UnwindInfoTable::s_publishingActive = false; #if _DEBUG // Fake functions on Win7 checked build to excercize the code paths, they are no-ops NTSTATUS WINAPI FakeRtlAddGrowableFunctionTable ( PVOID *DynamicTable, PT_RUNTIME_FUNCTION FunctionTable, ULONG EntryCount, ULONG MaximumEntryCount, ULONG_PTR rangeStart, ULONG_PTR rangeEnd) { *DynamicTable = (PVOID) 1; return 0; } VOID WINAPI FakeRtlGrowFunctionTable (PVOID DynamicTable, ULONG NewEntryCount) { } VOID WINAPI FakeRtlDeleteGrowableFunctionTable (PVOID DynamicTable) {} #endif /****************************************************************************/ // initialize the entry points for new win8 unwind info publishing functions. // return true if the initialize is successful (the functions exist) bool InitUnwindFtns() { CONTRACTL { NOTHROW; } CONTRACTL_END; #ifndef TARGET_UNIX if (!RtlUnwindFtnsInited) { HINSTANCE hNtdll = WszGetModuleHandle(W("ntdll.dll")); if (hNtdll != NULL) { void* growFunctionTable = GetProcAddress(hNtdll, "RtlGrowFunctionTable"); void* deleteGrowableFunctionTable = GetProcAddress(hNtdll, "RtlDeleteGrowableFunctionTable"); void* addGrowableFunctionTable = GetProcAddress(hNtdll, "RtlAddGrowableFunctionTable"); // All or nothing AddGroableFunctionTable is last (marker) if (growFunctionTable != NULL && deleteGrowableFunctionTable != NULL && addGrowableFunctionTable != NULL) { pRtlGrowFunctionTable = (RtlGrowFunctionTableFnPtr) growFunctionTable; pRtlDeleteGrowableFunctionTable = (RtlDeleteGrowableFunctionTableFnPtr) deleteGrowableFunctionTable; pRtlAddGrowableFunctionTable = (RtlAddGrowableFunctionTableFnPtr) addGrowableFunctionTable; } // Don't call FreeLibrary(hNtdll) because GetModuleHandle did *NOT* increment the reference count! } else { #if _DEBUG pRtlGrowFunctionTable = FakeRtlGrowFunctionTable; pRtlDeleteGrowableFunctionTable = FakeRtlDeleteGrowableFunctionTable; pRtlAddGrowableFunctionTable = FakeRtlAddGrowableFunctionTable; #endif } RtlUnwindFtnsInited = true; } return (pRtlAddGrowableFunctionTable != NULL); #else // !TARGET_UNIX return false; #endif // !TARGET_UNIX } /****************************************************************************/ UnwindInfoTable::UnwindInfoTable(ULONG_PTR rangeStart, ULONG_PTR rangeEnd, ULONG size) { STANDARD_VM_CONTRACT; _ASSERTE(s_pUnwindInfoTableLock->OwnedByCurrentThread()); _ASSERTE((rangeEnd - rangeStart) <= 0x7FFFFFFF); cTableCurCount = 0; cTableMaxCount = size; cDeletedEntries = 0; iRangeStart = rangeStart; iRangeEnd = rangeEnd; hHandle = NULL; pTable = new T_RUNTIME_FUNCTION[cTableMaxCount]; } /****************************************************************************/ UnwindInfoTable::~UnwindInfoTable() { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; _ASSERTE(s_publishingActive); // We do this lock free to because too many places still want no-trigger. It should be OK // It would be cleaner if we could take the lock (we did not have to be GC_NOTRIGGER) UnRegister(); delete[] pTable; } /*****************************************************************************/ void UnwindInfoTable::Register() { _ASSERTE(s_pUnwindInfoTableLock->OwnedByCurrentThread()); EX_TRY { hHandle = NULL; NTSTATUS ret = pRtlAddGrowableFunctionTable(&hHandle, pTable, cTableCurCount, cTableMaxCount, iRangeStart, iRangeEnd); if (ret != STATUS_SUCCESS) { _ASSERTE(!"Failed to publish UnwindInfo (ignorable)"); hHandle = NULL; STRESS_LOG3(LF_JIT, LL_ERROR, "UnwindInfoTable::Register ERROR %x creating table [%p, %p]\n", ret, iRangeStart, iRangeEnd); } else { STRESS_LOG3(LF_JIT, LL_INFO100, "UnwindInfoTable::Register Handle: %p [%p, %p]\n", hHandle, iRangeStart, iRangeEnd); } } EX_CATCH { hHandle = NULL; STRESS_LOG2(LF_JIT, LL_ERROR, "UnwindInfoTable::Register Exception while creating table [%p, %p]\n", iRangeStart, iRangeEnd); _ASSERTE(!"Failed to publish UnwindInfo (ignorable)"); } EX_END_CATCH(SwallowAllExceptions) } /*****************************************************************************/ void UnwindInfoTable::UnRegister() { PVOID handle = hHandle; hHandle = 0; if (handle != 0) { STRESS_LOG3(LF_JIT, LL_INFO100, "UnwindInfoTable::UnRegister Handle: %p [%p, %p]\n", handle, iRangeStart, iRangeEnd); pRtlDeleteGrowableFunctionTable(handle); } } /*****************************************************************************/ // Add 'data' to the linked list whose head is pointed at by 'unwindInfoPtr' // /* static */ void UnwindInfoTable::AddToUnwindInfoTable(UnwindInfoTable** unwindInfoPtr, PT_RUNTIME_FUNCTION data, TADDR rangeStart, TADDR rangeEnd) { CONTRACTL { THROWS; GC_TRIGGERS; } CONTRACTL_END; _ASSERTE(data->BeginAddress <= RUNTIME_FUNCTION__EndAddress(data, rangeStart)); _ASSERTE(RUNTIME_FUNCTION__EndAddress(data, rangeStart) <= (rangeEnd-rangeStart)); _ASSERTE(unwindInfoPtr != NULL); if (!s_publishingActive) return; CrstHolder ch(s_pUnwindInfoTableLock); UnwindInfoTable* unwindInfo = *unwindInfoPtr; // was the original list null, If so lazy initialize. if (unwindInfo == NULL) { // We can choose the average method size estimate dynamically based on past experience // 128 is the estimated size of an average method, so we can accurately predict // how many RUNTIME_FUNCTION entries are in each chunk we allocate. ULONG size = (ULONG) ((rangeEnd - rangeStart) / 128) + 1; // To insure the test the growing logic in debug code make the size much smaller. INDEBUG(size = size / 4 + 1); unwindInfo = (PTR_UnwindInfoTable)new UnwindInfoTable(rangeStart, rangeEnd, size); unwindInfo->Register(); *unwindInfoPtr = unwindInfo; } _ASSERTE(unwindInfo != NULL); // If new had failed, we would have thrown OOM _ASSERTE(unwindInfo->cTableCurCount <= unwindInfo->cTableMaxCount); _ASSERTE(unwindInfo->iRangeStart == rangeStart); _ASSERTE(unwindInfo->iRangeEnd == rangeEnd); // Means we had a failure publishing to the OS, in this case we give up if (unwindInfo->hHandle == NULL) return; // Check for the fast path: we are adding the the end of an UnwindInfoTable with space if (unwindInfo->cTableCurCount < unwindInfo->cTableMaxCount) { if (unwindInfo->cTableCurCount == 0 || unwindInfo->pTable[unwindInfo->cTableCurCount-1].BeginAddress < data->BeginAddress) { // Yeah, we can simply add to the end of table and we are done! unwindInfo->pTable[unwindInfo->cTableCurCount] = *data; unwindInfo->cTableCurCount++; // Add to the function table pRtlGrowFunctionTable(unwindInfo->hHandle, unwindInfo->cTableCurCount); STRESS_LOG5(LF_JIT, LL_INFO1000, "AddToUnwindTable Handle: %p [%p, %p] ADDING 0x%p TO END, now 0x%x entries\n", unwindInfo->hHandle, unwindInfo->iRangeStart, unwindInfo->iRangeEnd, data->BeginAddress, unwindInfo->cTableCurCount); return; } } // OK we need to rellocate the table and reregister. First figure out our 'desiredSpace' // We could imagine being much more efficient for 'bulk' updates, but we don't try // because we assume that this is rare and we want to keep the code simple ULONG usedSpace = unwindInfo->cTableCurCount - unwindInfo->cDeletedEntries; ULONG desiredSpace = usedSpace * 5 / 4 + 1; // Increase by 20% // Be more aggresive if we used all of our space; if (usedSpace == unwindInfo->cTableMaxCount) desiredSpace = usedSpace * 3 / 2 + 1; // Increase by 50% STRESS_LOG7(LF_JIT, LL_INFO100, "AddToUnwindTable Handle: %p [%p, %p] SLOW Realloc Cnt 0x%x Max 0x%x NewMax 0x%x, Adding %x\n", unwindInfo->hHandle, unwindInfo->iRangeStart, unwindInfo->iRangeEnd, unwindInfo->cTableCurCount, unwindInfo->cTableMaxCount, desiredSpace, data->BeginAddress); UnwindInfoTable* newTab = new UnwindInfoTable(unwindInfo->iRangeStart, unwindInfo->iRangeEnd, desiredSpace); // Copy in the entries, removing deleted entries and adding the new entry wherever it belongs int toIdx = 0; bool inserted = false; // Have we inserted 'data' into the table for(ULONG fromIdx = 0; fromIdx < unwindInfo->cTableCurCount; fromIdx++) { if (!inserted && data->BeginAddress < unwindInfo->pTable[fromIdx].BeginAddress) { STRESS_LOG1(LF_JIT, LL_INFO100, "AddToUnwindTable Inserted at MID position 0x%x\n", toIdx); newTab->pTable[toIdx++] = *data; inserted = true; } if (unwindInfo->pTable[fromIdx].UnwindData != 0) // A 'non-deleted' entry newTab->pTable[toIdx++] = unwindInfo->pTable[fromIdx]; } if (!inserted) { STRESS_LOG1(LF_JIT, LL_INFO100, "AddToUnwindTable Inserted at END position 0x%x\n", toIdx); newTab->pTable[toIdx++] = *data; } newTab->cTableCurCount = toIdx; STRESS_LOG2(LF_JIT, LL_INFO100, "AddToUnwindTable New size 0x%x max 0x%x\n", newTab->cTableCurCount, newTab->cTableMaxCount); _ASSERTE(newTab->cTableCurCount <= newTab->cTableMaxCount); // Unregister the old table *unwindInfoPtr = 0; unwindInfo->UnRegister(); // Note that there is a short time when we are not publishing... // Register the new table newTab->Register(); *unwindInfoPtr = newTab; delete unwindInfo; } /*****************************************************************************/ /* static */ void UnwindInfoTable::RemoveFromUnwindInfoTable(UnwindInfoTable** unwindInfoPtr, TADDR baseAddress, TADDR entryPoint) { CONTRACTL { NOTHROW; GC_TRIGGERS; } CONTRACTL_END; _ASSERTE(unwindInfoPtr != NULL); if (!s_publishingActive) return; CrstHolder ch(s_pUnwindInfoTableLock); UnwindInfoTable* unwindInfo = *unwindInfoPtr; if (unwindInfo != NULL) { DWORD relativeEntryPoint = (DWORD)(entryPoint - baseAddress); STRESS_LOG3(LF_JIT, LL_INFO100, "RemoveFromUnwindInfoTable Removing %p BaseAddress %p rel %x\n", entryPoint, baseAddress, relativeEntryPoint); for(ULONG i = 0; i < unwindInfo->cTableCurCount; i++) { if (unwindInfo->pTable[i].BeginAddress <= relativeEntryPoint && relativeEntryPoint < RUNTIME_FUNCTION__EndAddress(&unwindInfo->pTable[i], unwindInfo->iRangeStart)) { if (unwindInfo->pTable[i].UnwindData != 0) unwindInfo->cDeletedEntries++; unwindInfo->pTable[i].UnwindData = 0; // Mark the entry for deletion STRESS_LOG1(LF_JIT, LL_INFO100, "RemoveFromUnwindInfoTable Removed entry 0x%x\n", i); return; } } } STRESS_LOG2(LF_JIT, LL_WARNING, "RemoveFromUnwindInfoTable COULD NOT FIND %p BaseAddress %p\n", entryPoint, baseAddress); } /****************************************************************************/ // Publish the stack unwind data 'data' which is relative 'baseAddress' // to the operating system in a way ETW stack tracing can use. /* static */ void UnwindInfoTable::PublishUnwindInfoForMethod(TADDR baseAddress, PT_RUNTIME_FUNCTION unwindInfo, int unwindInfoCount) { STANDARD_VM_CONTRACT; if (!s_publishingActive) return; TADDR entry = baseAddress + unwindInfo->BeginAddress; RangeSection * pRS = ExecutionManager::FindCodeRange(entry, ExecutionManager::GetScanFlags()); _ASSERTE(pRS != NULL); if (pRS != NULL) { for(int i = 0; i < unwindInfoCount; i++) AddToUnwindInfoTable(&pRS->pUnwindInfoTable, &unwindInfo[i], pRS->LowAddress, pRS->HighAddress); } } /*****************************************************************************/ /* static */ void UnwindInfoTable::UnpublishUnwindInfoForMethod(TADDR entryPoint) { CONTRACTL { NOTHROW; GC_TRIGGERS; } CONTRACTL_END; if (!s_publishingActive) return; RangeSection * pRS = ExecutionManager::FindCodeRange(entryPoint, ExecutionManager::GetScanFlags()); _ASSERTE(pRS != NULL); if (pRS != NULL) { _ASSERTE(pRS->pjit->GetCodeType() == (miManaged | miIL)); if (pRS->pjit->GetCodeType() == (miManaged | miIL)) { // This cast is justified because only EEJitManager's have the code type above. EEJitManager* pJitMgr = (EEJitManager*)(pRS->pjit); CodeHeader * pHeader = pJitMgr->GetCodeHeaderFromStartAddress(entryPoint); for(ULONG i = 0; i < pHeader->GetNumberOfUnwindInfos(); i++) RemoveFromUnwindInfoTable(&pRS->pUnwindInfoTable, pRS->LowAddress, pRS->LowAddress + pHeader->GetUnwindInfo(i)->BeginAddress); } } } #ifdef STUBLINKER_GENERATES_UNWIND_INFO extern StubUnwindInfoHeapSegment *g_StubHeapSegments; #endif // STUBLINKER_GENERATES_UNWIND_INFO extern CrstStatic g_StubUnwindInfoHeapSegmentsCrst; /*****************************************************************************/ // Publish all existing JIT compiled methods by iterating through the code heap // Note that because we need to keep the entries in order we have to hold // s_pUnwindInfoTableLock so that all entries get inserted in the correct order. // (we rely on heapIterator walking the methods in a heap section in order). /* static */ void UnwindInfoTable::PublishUnwindInfoForExistingMethods() { STANDARD_VM_CONTRACT; { // CodeHeapIterator holds the m_CodeHeapCritSec, which insures code heaps don't get deallocated while being walked EEJitManager::CodeHeapIterator heapIterator(NULL); // Currently m_CodeHeapCritSec is given the CRST_UNSAFE_ANYMODE flag which allows it to be taken in a GC_NOTRIGGER // region but also disallows GC_TRIGGERS. We need GC_TRIGGERS because we take another lock. Ideally we would // fix m_CodeHeapCritSec to not have the CRST_UNSAFE_ANYMODE flag, but I currently reached my threshold for fixing // contracts. CONTRACT_VIOLATION(GCViolation); while(heapIterator.Next()) { MethodDesc *pMD = heapIterator.GetMethod(); if(pMD) { PCODE methodEntry =(PCODE) heapIterator.GetMethodCode(); RangeSection * pRS = ExecutionManager::FindCodeRange(methodEntry, ExecutionManager::GetScanFlags()); _ASSERTE(pRS != NULL); _ASSERTE(pRS->pjit->GetCodeType() == (miManaged | miIL)); if (pRS != NULL && pRS->pjit->GetCodeType() == (miManaged | miIL)) { // This cast is justified because only EEJitManager's have the code type above. EEJitManager* pJitMgr = (EEJitManager*)(pRS->pjit); CodeHeader * pHeader = pJitMgr->GetCodeHeaderFromStartAddress(methodEntry); int unwindInfoCount = pHeader->GetNumberOfUnwindInfos(); for(int i = 0; i < unwindInfoCount; i++) AddToUnwindInfoTable(&pRS->pUnwindInfoTable, pHeader->GetUnwindInfo(i), pRS->LowAddress, pRS->HighAddress); } } } } #ifdef STUBLINKER_GENERATES_UNWIND_INFO // Enumerate all existing stubs CrstHolder crst(&g_StubUnwindInfoHeapSegmentsCrst); for (StubUnwindInfoHeapSegment* pStubHeapSegment = g_StubHeapSegments; pStubHeapSegment; pStubHeapSegment = pStubHeapSegment->pNext) { // The stubs are in reverse order, so we reverse them so they are in memory order CQuickArrayList<StubUnwindInfoHeader*> list; for (StubUnwindInfoHeader *pHeader = pStubHeapSegment->pUnwindHeaderList; pHeader; pHeader = pHeader->pNext) list.Push(pHeader); for(int i = (int) list.Size()-1; i >= 0; --i) { StubUnwindInfoHeader *pHeader = list[i]; AddToUnwindInfoTable(&pStubHeapSegment->pUnwindInfoTable, &pHeader->FunctionEntry, (TADDR) pStubHeapSegment->pbBaseAddress, (TADDR) pStubHeapSegment->pbBaseAddress + pStubHeapSegment->cbSegment); } } #endif // STUBLINKER_GENERATES_UNWIND_INFO } /*****************************************************************************/ // turn on the publishing of unwind info. Called when the ETW rundown provider // is turned on. /* static */ void UnwindInfoTable::PublishUnwindInfo(bool publishExisting) { CONTRACTL { NOTHROW; GC_TRIGGERS; } CONTRACTL_END; if (s_publishingActive) return; // If we don't have the APIs we need, give up if (!InitUnwindFtns()) return; EX_TRY { // Create the lock Crst* newCrst = new Crst(CrstUnwindInfoTableLock); if (InterlockedCompareExchangeT(&s_pUnwindInfoTableLock, newCrst, NULL) == NULL) { s_publishingActive = true; if (publishExisting) PublishUnwindInfoForExistingMethods(); } else delete newCrst; // we were in a race and failed, throw away the Crst we made. } EX_CATCH { STRESS_LOG1(LF_JIT, LL_ERROR, "Exception happened when doing unwind Info rundown. EIP of last AV = %p\n", g_LastAccessViolationEIP); _ASSERTE(!"Exception thrown while publishing 'catchup' ETW unwind information"); s_publishingActive = false; // Try to minimize damage. } EX_END_CATCH(SwallowAllExceptions); } #endif // defined(TARGET_AMD64) && !defined(DACCESS_COMPILE) /*----------------------------------------------------------------------------- This is a listing of which methods uses which synchronization mechanism in the EEJitManager. //----------------------------------------------------------------------------- Setters of EEJitManager::m_CodeHeapCritSec ----------------------------------------------- allocCode allocGCInfo allocEHInfo allocJumpStubBlock ResolveEHClause RemoveJitData Unload ReleaseReferenceToHeap JitCodeToMethodInfo Need EEJitManager::m_CodeHeapCritSec to be set ----------------------------------------------- NewCodeHeap allocCodeRaw GetCodeHeapList RemoveCodeHeapFromDomainList DeleteCodeHeap AddRangeToJitHeapCache DeleteJitHeapCache */ #if !defined(DACCESS_COMPILE) EEJitManager::CodeHeapIterator::CodeHeapIterator(LoaderAllocator *pLoaderAllocatorFilter) : m_lockHolder(&(ExecutionManager::GetEEJitManager()->m_CodeHeapCritSec)), m_Iterator(NULL, 0, NULL, 0) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; m_pHeapList = NULL; m_pLoaderAllocator = pLoaderAllocatorFilter; m_pHeapList = ExecutionManager::GetEEJitManager()->GetCodeHeapList(); if(m_pHeapList) new (&m_Iterator) MethodSectionIterator((const void *)m_pHeapList->mapBase, (COUNT_T)m_pHeapList->maxCodeHeapSize, m_pHeapList->pHdrMap, (COUNT_T)HEAP2MAPSIZE(ROUND_UP_TO_PAGE(m_pHeapList->maxCodeHeapSize))); }; EEJitManager::CodeHeapIterator::~CodeHeapIterator() { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; } BOOL EEJitManager::CodeHeapIterator::Next() { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; if(!m_pHeapList) return FALSE; while(1) { if(!m_Iterator.Next()) { m_pHeapList = m_pHeapList->GetNext(); if(!m_pHeapList) return FALSE; new (&m_Iterator) MethodSectionIterator((const void *)m_pHeapList->mapBase, (COUNT_T)m_pHeapList->maxCodeHeapSize, m_pHeapList->pHdrMap, (COUNT_T)HEAP2MAPSIZE(ROUND_UP_TO_PAGE(m_pHeapList->maxCodeHeapSize))); } else { BYTE * code = m_Iterator.GetMethodCode(); CodeHeader * pHdr = (CodeHeader *)(code - sizeof(CodeHeader)); m_pCurrent = !pHdr->IsStubCodeBlock() ? pHdr->GetMethodDesc() : NULL; // LoaderAllocator filter if (m_pLoaderAllocator && m_pCurrent) { LoaderAllocator *pCurrentLoaderAllocator = m_pCurrent->GetLoaderAllocator(); if(pCurrentLoaderAllocator != m_pLoaderAllocator) continue; } return TRUE; } } } #endif // !DACCESS_COMPILE #ifndef DACCESS_COMPILE //--------------------------------------------------------------------------------------- // // ReaderLockHolder::ReaderLockHolder takes the reader lock, checks for the writer lock // and either aborts if the writer lock is held, or yields until the writer lock is released, // keeping the reader lock. This is normally called in the constructor for the // ReaderLockHolder. // // The writer cannot be taken if there are any readers. The WriterLockHolder functions take the // writer lock and check for any readers. If there are any, the WriterLockHolder functions // release the writer and yield to wait for the readers to be done. ExecutionManager::ReaderLockHolder::ReaderLockHolder(HostCallPreference hostCallPreference /*=AllowHostCalls*/) { CONTRACTL { NOTHROW; if (hostCallPreference == AllowHostCalls) { HOST_CALLS; } else { HOST_NOCALLS; } GC_NOTRIGGER; CAN_TAKE_LOCK; } CONTRACTL_END; IncCantAllocCount(); FastInterlockIncrement(&m_dwReaderCount); EE_LOCK_TAKEN(GetPtrForLockContract()); if (VolatileLoad(&m_dwWriterLock) != 0) { if (hostCallPreference != AllowHostCalls) { // Rats, writer lock is held. Gotta bail. Since the reader count was already // incremented, we're technically still blocking writers at the moment. But // the holder who called us is about to call DecrementReader in its // destructor and unblock writers. return; } YIELD_WHILE ((VolatileLoad(&m_dwWriterLock) != 0)); } } //--------------------------------------------------------------------------------------- // // See code:ExecutionManager::ReaderLockHolder::ReaderLockHolder. This just decrements the reader count. ExecutionManager::ReaderLockHolder::~ReaderLockHolder() { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; FastInterlockDecrement(&m_dwReaderCount); DecCantAllocCount(); EE_LOCK_RELEASED(GetPtrForLockContract()); } //--------------------------------------------------------------------------------------- // // Returns whether the reader lock is acquired BOOL ExecutionManager::ReaderLockHolder::Acquired() { LIMITED_METHOD_CONTRACT; return VolatileLoad(&m_dwWriterLock) == 0; } ExecutionManager::WriterLockHolder::WriterLockHolder() { CONTRACTL { NOTHROW; GC_NOTRIGGER; CAN_TAKE_LOCK; } CONTRACTL_END; _ASSERTE(m_dwWriterLock == 0); // Signal to a debugger that this thread cannot stop now IncCantStopCount(); IncCantAllocCount(); DWORD dwSwitchCount = 0; while (TRUE) { // While this thread holds the writer lock, we must not try to suspend it // or allow a profiler to walk its stack Thread::IncForbidSuspendThread(); FastInterlockIncrement(&m_dwWriterLock); if (m_dwReaderCount == 0) break; FastInterlockDecrement(&m_dwWriterLock); // Before we loop and retry, it's safe to suspend or hijack and inspect // this thread Thread::DecForbidSuspendThread(); __SwitchToThread(0, ++dwSwitchCount); } EE_LOCK_TAKEN(GetPtrForLockContract()); } ExecutionManager::WriterLockHolder::~WriterLockHolder() { LIMITED_METHOD_CONTRACT; FastInterlockDecrement(&m_dwWriterLock); // Writer lock released, so it's safe again for this thread to be // suspended or have its stack walked by a profiler Thread::DecForbidSuspendThread(); DecCantAllocCount(); // Signal to a debugger that it's again safe to stop this thread DecCantStopCount(); EE_LOCK_RELEASED(GetPtrForLockContract()); } #else // For DAC builds, we only care whether the writer lock is held. // If it is, we will assume the locked data is in an inconsistent // state and throw. We never actually take the lock. // Note: Throws ExecutionManager::ReaderLockHolder::ReaderLockHolder(HostCallPreference hostCallPreference /*=AllowHostCalls*/) { SUPPORTS_DAC; if (m_dwWriterLock != 0) { ThrowHR(CORDBG_E_PROCESS_NOT_SYNCHRONIZED); } } ExecutionManager::ReaderLockHolder::~ReaderLockHolder() { } #endif // DACCESS_COMPILE /*----------------------------------------------------------------------------- This is a listing of which methods uses which synchronization mechanism in the ExecutionManager //----------------------------------------------------------------------------- ============================================================================== ExecutionManger::ReaderLockHolder and ExecutionManger::WriterLockHolder Protects the callers of ExecutionManager::GetRangeSection from heap deletions while walking RangeSections. You need to take a reader lock before reading the values: m_CodeRangeList and hold it while walking the lists Uses ReaderLockHolder (allows multiple reeaders with no writers) ----------------------------------------- ExecutionManager::FindCodeRange ExecutionManager::FindZapModule ExecutionManager::EnumMemoryRegions Uses WriterLockHolder (allows single writer and no readers) ----------------------------------------- ExecutionManager::AddRangeHelper ExecutionManager::DeleteRangeHelper */ //----------------------------------------------------------------------------- #if defined(TARGET_ARM) || defined(TARGET_ARM64) #define EXCEPTION_DATA_SUPPORTS_FUNCTION_FRAGMENTS #endif #if defined(EXCEPTION_DATA_SUPPORTS_FUNCTION_FRAGMENTS) // The function fragments can be used in Hot/Cold splitting, expressing Large Functions or in 'ShrinkWrapping', which is // delaying saving and restoring some callee-saved registers later inside the body of the method. // (It's assumed that JIT will not emit any ShrinkWrapping-style methods) // For these cases multiple RUNTIME_FUNCTION entries (a.k.a function fragments) are used to define // all the regions of the function or funclet. And one of these function fragments cover the beginning of the function/funclet, // including the prolog section and is referred as the 'Host Record'. // This function returns TRUE if the inspected RUNTIME_FUNCTION entry is NOT a host record BOOL IsFunctionFragment(TADDR baseAddress, PTR_RUNTIME_FUNCTION pFunctionEntry) { LIMITED_METHOD_DAC_CONTRACT; _ASSERTE((pFunctionEntry->UnwindData & 3) == 0); // The unwind data must be an RVA; we don't support packed unwind format DWORD unwindHeader = *(PTR_DWORD)(baseAddress + pFunctionEntry->UnwindData); _ASSERTE((0 == ((unwindHeader >> 18) & 3)) || !"unknown unwind data format, version != 0"); #if defined(TARGET_ARM) // On ARM, It's assumed that the prolog is always at the beginning of the function and cannot be split. // Given that, there are 4 possible ways to fragment a function: // 1. Prolog only: // 2. Prolog and some epilogs: // 3. Epilogs only: // 4. No Prolog or epilog // // Function fragments describing 1 & 2 are host records, 3 & 4 are not. // for 3 & 4, the .xdata record's F bit is set to 1, marking clearly what is NOT a host record _ASSERTE((pFunctionEntry->BeginAddress & THUMB_CODE) == THUMB_CODE); // Sanity check: it's a thumb address DWORD Fbit = (unwindHeader >> 22) & 0x1; // F "fragment" bit return (Fbit == 1); #elif defined(TARGET_ARM64) // ARM64 is a little bit more flexible, in the sense that it supports partial prologs. However only one of the // prolog regions are allowed to alter SP and that's the Host Record. Partial prologs are used in ShrinkWrapping // scenarios which is not supported, hence we don't need to worry about them. discarding partial prologs // simplifies identifying a host record a lot. // // 1. Prolog only: The host record. Epilog Count and E bit are all 0. // 2. Prolog and some epilogs: The host record with accompanying epilog-only records // 3. Epilogs only: First unwind code is Phantom prolog (Starting with an end_c, indicating an empty prolog) // 4. No prologs or epilogs: First unwind code is Phantom prolog (Starting with an end_c, indicating an empty prolog) // int EpilogCount = (int)(unwindHeader >> 22) & 0x1F; int CodeWords = unwindHeader >> 27; PTR_DWORD pUnwindCodes = (PTR_DWORD)(baseAddress + pFunctionEntry->UnwindData); // Skip header. pUnwindCodes++; // Skip extended header. if ((CodeWords == 0) && (EpilogCount == 0)) { EpilogCount = (*pUnwindCodes) & 0xFFFF; pUnwindCodes++; } // Skip epilog scopes. BOOL Ebit = (unwindHeader >> 21) & 0x1; if (!Ebit && (EpilogCount != 0)) { // EpilogCount is the number of exception scopes defined right after the unwindHeader pUnwindCodes += EpilogCount; } return ((*pUnwindCodes & 0xFF) == 0xE5); #else PORTABILITY_ASSERT("IsFunctionFragnent - NYI on this platform"); #endif } // When we have fragmented unwind we usually want to refer to the // unwind record that includes the prolog. We can find it by searching // back in the sequence of unwind records. PTR_RUNTIME_FUNCTION FindRootEntry(PTR_RUNTIME_FUNCTION pFunctionEntry, TADDR baseAddress) { LIMITED_METHOD_DAC_CONTRACT; PTR_RUNTIME_FUNCTION pRootEntry = pFunctionEntry; if (pRootEntry != NULL) { // Walk backwards in the RUNTIME_FUNCTION array until we find a non-fragment. // We're guaranteed to find one, because we require that a fragment live in a function or funclet // that has a prolog, which will have non-fragment .xdata. for (;;) { if (!IsFunctionFragment(baseAddress, pRootEntry)) { // This is not a fragment; we're done break; } --pRootEntry; } } return pRootEntry; } #endif // EXCEPTION_DATA_SUPPORTS_FUNCTION_FRAGMENTS #ifndef DACCESS_COMPILE //********************************************************************************** // IJitManager //********************************************************************************** IJitManager::IJitManager() { LIMITED_METHOD_CONTRACT; m_runtimeSupport = ExecutionManager::GetDefaultCodeManager(); } #endif // #ifndef DACCESS_COMPILE // When we unload an appdomain, we need to make sure that any threads that are crawling through // our heap or rangelist are out. For cooperative-mode threads, we know that they will have // been stopped when we suspend the EE so they won't be touching an element that is about to be deleted. // However for pre-emptive mode threads, they could be stalled right on top of the element we want // to delete, so we need to apply the reader lock to them and wait for them to drain. ExecutionManager::ScanFlag ExecutionManager::GetScanFlags() { CONTRACTL { NOTHROW; GC_NOTRIGGER; HOST_NOCALLS; SUPPORTS_DAC; } CONTRACTL_END; #if !defined(DACCESS_COMPILE) Thread *pThread = GetThreadNULLOk(); if (!pThread) return ScanNoReaderLock; // If this thread is hijacked by a profiler and crawling its own stack, // we do need to take the lock if (pThread->GetProfilerFilterContext() != NULL) return ScanReaderLock; if (pThread->PreemptiveGCDisabled() || (pThread == ThreadSuspend::GetSuspensionThread())) return ScanNoReaderLock; return ScanReaderLock; #else return ScanNoReaderLock; #endif } #ifdef DACCESS_COMPILE void IJitManager::EnumMemoryRegions(CLRDataEnumMemoryFlags flags) { DAC_ENUM_VTHIS(); if (m_runtimeSupport.IsValid()) { m_runtimeSupport->EnumMemoryRegions(flags); } } #endif // #ifdef DACCESS_COMPILE #if defined(FEATURE_EH_FUNCLETS) PTR_VOID GetUnwindDataBlob(TADDR moduleBase, PTR_RUNTIME_FUNCTION pRuntimeFunction, /* out */ SIZE_T * pSize) { LIMITED_METHOD_CONTRACT; #if defined(TARGET_AMD64) PTR_UNWIND_INFO pUnwindInfo(dac_cast<PTR_UNWIND_INFO>(moduleBase + RUNTIME_FUNCTION__GetUnwindInfoAddress(pRuntimeFunction))); *pSize = ALIGN_UP(offsetof(UNWIND_INFO, UnwindCode) + sizeof(UNWIND_CODE) * pUnwindInfo->CountOfUnwindCodes + sizeof(ULONG) /* personality routine is always present */, sizeof(DWORD)); return pUnwindInfo; #elif defined(TARGET_X86) PTR_UNWIND_INFO pUnwindInfo(dac_cast<PTR_UNWIND_INFO>(moduleBase + RUNTIME_FUNCTION__GetUnwindInfoAddress(pRuntimeFunction))); *pSize = sizeof(UNWIND_INFO); return pUnwindInfo; #elif defined(TARGET_ARM) || defined(TARGET_ARM64) // if this function uses packed unwind data then at least one of the two least significant bits // will be non-zero. if this is the case then there will be no xdata record to enumerate. _ASSERTE((pRuntimeFunction->UnwindData & 0x3) == 0); // compute the size of the unwind info PTR_DWORD xdata = dac_cast<PTR_DWORD>(pRuntimeFunction->UnwindData + moduleBase); int size = 4; #if defined(TARGET_ARM) // See https://docs.microsoft.com/en-us/cpp/build/arm-exception-handling int unwindWords = xdata[0] >> 28; int epilogScopes = (xdata[0] >> 23) & 0x1f; #else // See https://docs.microsoft.com/en-us/cpp/build/arm64-exception-handling int unwindWords = xdata[0] >> 27; int epilogScopes = (xdata[0] >> 22) & 0x1f; #endif if (unwindWords == 0 && epilogScopes == 0) { size += 4; unwindWords = (xdata[1] >> 16) & 0xff; epilogScopes = xdata[1] & 0xffff; } if (!(xdata[0] & (1 << 21))) size += 4 * epilogScopes; size += 4 * unwindWords; _ASSERTE(xdata[0] & (1 << 20)); // personality routine should be always present size += 4; *pSize = size; return xdata; #else PORTABILITY_ASSERT("GetUnwindDataBlob"); return NULL; #endif } // GetFuncletStartAddress returns the starting address of the function or funclet indicated by the EECodeInfo address. TADDR IJitManager::GetFuncletStartAddress(EECodeInfo * pCodeInfo) { PTR_RUNTIME_FUNCTION pFunctionEntry = pCodeInfo->GetFunctionEntry(); #ifdef TARGET_AMD64 _ASSERTE((pFunctionEntry->UnwindData & RUNTIME_FUNCTION_INDIRECT) == 0); #endif TADDR baseAddress = pCodeInfo->GetModuleBase(); #if defined(EXCEPTION_DATA_SUPPORTS_FUNCTION_FRAGMENTS) pFunctionEntry = FindRootEntry(pFunctionEntry, baseAddress); #endif // EXCEPTION_DATA_SUPPORTS_FUNCTION_FRAGMENTS TADDR funcletStartAddress = baseAddress + RUNTIME_FUNCTION__BeginAddress(pFunctionEntry); return funcletStartAddress; } BOOL IJitManager::IsFunclet(EECodeInfo * pCodeInfo) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; TADDR funcletStartAddress = GetFuncletStartAddress(pCodeInfo); TADDR methodStartAddress = pCodeInfo->GetStartAddress(); return (funcletStartAddress != methodStartAddress); } BOOL IJitManager::IsFilterFunclet(EECodeInfo * pCodeInfo) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; if (!pCodeInfo->IsFunclet()) return FALSE; TADDR funcletStartAddress = GetFuncletStartAddress(pCodeInfo); // This assumes no hot/cold splitting for funclets _ASSERTE(FitsInU4(pCodeInfo->GetCodeAddress() - funcletStartAddress)); DWORD relOffsetWithinFunclet = static_cast<DWORD>(pCodeInfo->GetCodeAddress() - funcletStartAddress); _ASSERTE(pCodeInfo->GetRelOffset() >= relOffsetWithinFunclet); DWORD funcletStartOffset = pCodeInfo->GetRelOffset() - relOffsetWithinFunclet; EH_CLAUSE_ENUMERATOR pEnumState; unsigned EHCount = InitializeEHEnumeration(pCodeInfo->GetMethodToken(), &pEnumState); _ASSERTE(EHCount > 0); EE_ILEXCEPTION_CLAUSE EHClause; for (ULONG i = 0; i < EHCount; i++) { GetNextEHClause(&pEnumState, &EHClause); // Duplicate clauses are always listed at the end, so when we hit a duplicate clause, // we have already visited all of the normal clauses. if (IsDuplicateClause(&EHClause)) { break; } if (IsFilterHandler(&EHClause)) { if (EHClause.FilterOffset == funcletStartOffset) { return true; } } } return false; } #else // FEATURE_EH_FUNCLETS PTR_VOID GetUnwindDataBlob(TADDR moduleBase, PTR_RUNTIME_FUNCTION pRuntimeFunction, /* out */ SIZE_T * pSize) { *pSize = 0; return dac_cast<PTR_VOID>(pRuntimeFunction->UnwindData + moduleBase); } #endif // FEATURE_EH_FUNCLETS #ifndef DACCESS_COMPILE //********************************************************************************** // EEJitManager //********************************************************************************** EEJitManager::EEJitManager() : // CRST_DEBUGGER_THREAD - We take this lock on debugger thread during EnC add method, among other things // CRST_TAKEN_DURING_SHUTDOWN - We take this lock during shutdown if ETW is on (to do rundown) m_CodeHeapCritSec( CrstSingleUseLock, CrstFlags(CRST_UNSAFE_ANYMODE|CRST_DEBUGGER_THREAD|CRST_TAKEN_DURING_SHUTDOWN)), m_CPUCompileFlags(), m_JitLoadCritSec( CrstSingleUseLock ) { CONTRACTL { THROWS; GC_NOTRIGGER; } CONTRACTL_END; m_pCodeHeap = NULL; m_jit = NULL; m_JITCompiler = NULL; #ifdef TARGET_AMD64 m_pEmergencyJumpStubReserveList = NULL; #endif #if defined(TARGET_X86) || defined(TARGET_AMD64) m_JITCompilerOther = NULL; #endif #ifdef ALLOW_SXS_JIT m_alternateJit = NULL; m_AltJITCompiler = NULL; m_AltJITRequired = false; #endif m_cleanupList = NULL; } #if defined(TARGET_X86) || defined(TARGET_AMD64) bool DoesOSSupportAVX() { LIMITED_METHOD_CONTRACT; #ifndef TARGET_UNIX // On Windows we have an api(GetEnabledXStateFeatures) to check if AVX is supported typedef DWORD64 (WINAPI *PGETENABLEDXSTATEFEATURES)(); PGETENABLEDXSTATEFEATURES pfnGetEnabledXStateFeatures = NULL; HMODULE hMod = WszLoadLibraryEx(WINDOWS_KERNEL32_DLLNAME_W, NULL, LOAD_LIBRARY_SEARCH_SYSTEM32); if(hMod == NULL) return FALSE; pfnGetEnabledXStateFeatures = (PGETENABLEDXSTATEFEATURES)GetProcAddress(hMod, "GetEnabledXStateFeatures"); if (pfnGetEnabledXStateFeatures == NULL) { return FALSE; } DWORD64 FeatureMask = pfnGetEnabledXStateFeatures(); if ((FeatureMask & XSTATE_MASK_AVX) == 0) { return FALSE; } #endif // !TARGET_UNIX return TRUE; } #endif // defined(TARGET_X86) || defined(TARGET_AMD64) #ifdef TARGET_ARM64 extern "C" DWORD64 __stdcall GetDataCacheZeroIDReg(); #endif void EEJitManager::SetCpuInfo() { LIMITED_METHOD_CONTRACT; // // NOTE: This function needs to be kept in sync with compSetProcesor() in jit\compiler.cpp // CORJIT_FLAGS CPUCompileFlags; #if defined(TARGET_X86) CORINFO_CPU cpuInfo; GetSpecificCpuInfo(&cpuInfo); switch (CPU_X86_FAMILY(cpuInfo.dwCPUType)) { case CPU_X86_PENTIUM_4: CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_TARGET_P4); break; default: break; } if (CPU_X86_USE_CMOV(cpuInfo.dwFeatures)) { CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_USE_CMOV); CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_USE_FCOMI); } #endif // TARGET_X86 #if defined(TARGET_X86) || defined(TARGET_AMD64) CPUCompileFlags.Set(InstructionSet_X86Base); // NOTE: The below checks are based on the information reported by // Intel® 64 and IA-32 Architectures Software Developer’s Manual. Volume 2 // and // AMD64 Architecture Programmer’s Manual. Volume 3 // For more information, please refer to the CPUID instruction in the respective manuals // We will set the following flags: // CORJIT_FLAG_USE_SSE2 is required // SSE - EDX bit 25 // SSE2 - EDX bit 26 // CORJIT_FLAG_USE_AES // CORJIT_FLAG_USE_SSE2 // AES - ECX bit 25 // CORJIT_FLAG_USE_PCLMULQDQ // CORJIT_FLAG_USE_SSE2 // PCLMULQDQ - ECX bit 1 // CORJIT_FLAG_USE_SSE3 if the following feature bits are set (input EAX of 1) // CORJIT_FLAG_USE_SSE2 // SSE3 - ECX bit 0 // CORJIT_FLAG_USE_SSSE3 if the following feature bits are set (input EAX of 1) // CORJIT_FLAG_USE_SSE3 // SSSE3 - ECX bit 9 // CORJIT_FLAG_USE_SSE41 if the following feature bits are set (input EAX of 1) // CORJIT_FLAG_USE_SSSE3 // SSE4.1 - ECX bit 19 // CORJIT_FLAG_USE_SSE42 if the following feature bits are set (input EAX of 1) // CORJIT_FLAG_USE_SSE41 // SSE4.2 - ECX bit 20 // CORJIT_FLAG_USE_POPCNT if the following feature bits are set (input EAX of 1) // CORJIT_FLAG_USE_SSE42 // POPCNT - ECX bit 23 // CORJIT_FLAG_USE_AVX if the following feature bits are set (input EAX of 1), and xmmYmmStateSupport returns 1: // CORJIT_FLAG_USE_SSE42 // OSXSAVE - ECX bit 27 // AVX - ECX bit 28 // XGETBV - XCR0[2:1] 11b // CORJIT_FLAG_USE_FMA if the following feature bits are set (input EAX of 1), and xmmYmmStateSupport returns 1: // CORJIT_FLAG_USE_AVX // FMA - ECX bit 12 // CORJIT_FLAG_USE_AVX2 if the following feature bit is set (input EAX of 0x07 and input ECX of 0): // CORJIT_FLAG_USE_AVX // AVX2 - EBX bit 5 // CORJIT_FLAG_USE_AVXVNNI if the following feature bit is set (input EAX of 0x07 and input ECX of 1): // CORJIT_FLAG_USE_AVX2 // AVXVNNI - EAX bit 4 // CORJIT_FLAG_USE_AVX_512 is not currently set, but defined so that it can be used in future without // CORJIT_FLAG_USE_BMI1 if the following feature bit is set (input EAX of 0x07 and input ECX of 0): // BMI1 - EBX bit 3 // CORJIT_FLAG_USE_BMI2 if the following feature bit is set (input EAX of 0x07 and input ECX of 0): // BMI2 - EBX bit 8 // CORJIT_FLAG_USE_LZCNT if the following feature bits are set (input EAX of 80000001H) // LZCNT - ECX bit 5 // synchronously updating VM and JIT. int cpuidInfo[4]; const int EAX = CPUID_EAX; const int EBX = CPUID_EBX; const int ECX = CPUID_ECX; const int EDX = CPUID_EDX; __cpuid(cpuidInfo, 0x00000000); uint32_t maxCpuId = static_cast<uint32_t>(cpuidInfo[EAX]); if (maxCpuId >= 1) { __cpuid(cpuidInfo, 0x00000001); if (((cpuidInfo[EDX] & (1 << 25)) != 0) && ((cpuidInfo[EDX] & (1 << 26)) != 0)) // SSE & SSE2 { CPUCompileFlags.Set(InstructionSet_SSE); CPUCompileFlags.Set(InstructionSet_SSE2); if ((cpuidInfo[ECX] & (1 << 25)) != 0) // AESNI { CPUCompileFlags.Set(InstructionSet_AES); } if ((cpuidInfo[ECX] & (1 << 1)) != 0) // PCLMULQDQ { CPUCompileFlags.Set(InstructionSet_PCLMULQDQ); } if ((cpuidInfo[ECX] & (1 << 0)) != 0) // SSE3 { CPUCompileFlags.Set(InstructionSet_SSE3); if ((cpuidInfo[ECX] & (1 << 9)) != 0) // SSSE3 { CPUCompileFlags.Set(InstructionSet_SSSE3); if ((cpuidInfo[ECX] & (1 << 19)) != 0) // SSE4.1 { CPUCompileFlags.Set(InstructionSet_SSE41); if ((cpuidInfo[ECX] & (1 << 20)) != 0) // SSE4.2 { CPUCompileFlags.Set(InstructionSet_SSE42); if ((cpuidInfo[ECX] & (1 << 23)) != 0) // POPCNT { CPUCompileFlags.Set(InstructionSet_POPCNT); } if (((cpuidInfo[ECX] & (1 << 27)) != 0) && ((cpuidInfo[ECX] & (1 << 28)) != 0)) // OSXSAVE & AVX { if(DoesOSSupportAVX() && (xmmYmmStateSupport() == 1)) // XGETBV == 11 { CPUCompileFlags.Set(InstructionSet_AVX); if ((cpuidInfo[ECX] & (1 << 12)) != 0) // FMA { CPUCompileFlags.Set(InstructionSet_FMA); } if (maxCpuId >= 0x07) { __cpuidex(cpuidInfo, 0x00000007, 0x00000000); if ((cpuidInfo[EBX] & (1 << 5)) != 0) // AVX2 { CPUCompileFlags.Set(InstructionSet_AVX2); __cpuidex(cpuidInfo, 0x00000007, 0x00000001); if ((cpuidInfo[EAX] & (1 << 4)) != 0) // AVX-VNNI { CPUCompileFlags.Set(InstructionSet_AVXVNNI); } } } } } } } } } static ConfigDWORD fFeatureSIMD; if (fFeatureSIMD.val(CLRConfig::EXTERNAL_FeatureSIMD) != 0) { CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_FEATURE_SIMD); } if (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_SIMD16ByteOnly) != 0) { CPUCompileFlags.Clear(InstructionSet_AVX2); } } if (maxCpuId >= 0x07) { __cpuidex(cpuidInfo, 0x00000007, 0x00000000); if ((cpuidInfo[EBX] & (1 << 3)) != 0) // BMI1 { CPUCompileFlags.Set(InstructionSet_BMI1); } if ((cpuidInfo[EBX] & (1 << 8)) != 0) // BMI2 { CPUCompileFlags.Set(InstructionSet_BMI2); } } } __cpuid(cpuidInfo, 0x80000000); uint32_t maxCpuIdEx = static_cast<uint32_t>(cpuidInfo[EAX]); if (maxCpuIdEx >= 0x80000001) { __cpuid(cpuidInfo, 0x80000001); if ((cpuidInfo[ECX] & (1 << 5)) != 0) // LZCNT { CPUCompileFlags.Set(InstructionSet_LZCNT); } } if (!CPUCompileFlags.IsSet(InstructionSet_SSE)) { EEPOLICY_HANDLE_FATAL_ERROR_WITH_MESSAGE(COR_E_EXECUTIONENGINE, W("SSE is not supported on the processor.")); } if (!CPUCompileFlags.IsSet(InstructionSet_SSE2)) { EEPOLICY_HANDLE_FATAL_ERROR_WITH_MESSAGE(COR_E_EXECUTIONENGINE, W("SSE2 is not supported on the processor.")); } #endif // defined(TARGET_X86) || defined(TARGET_AMD64) #if defined(TARGET_ARM64) static ConfigDWORD fFeatureSIMD; if (fFeatureSIMD.val(CLRConfig::EXTERNAL_FeatureSIMD) != 0) { CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_FEATURE_SIMD); } #if defined(TARGET_UNIX) PAL_GetJitCpuCapabilityFlags(&CPUCompileFlags); // For HOST_ARM64, if OS has exposed mechanism to detect CPU capabilities, make sure it has AdvSimd capability. // For other cases i.e. if !HOST_ARM64 but TARGET_ARM64 or HOST_ARM64 but OS doesn't expose way to detect // CPU capabilities, we always enable AdvSimd flags by default. // if (!CPUCompileFlags.IsSet(InstructionSet_AdvSimd)) { EEPOLICY_HANDLE_FATAL_ERROR_WITH_MESSAGE(COR_E_EXECUTIONENGINE, W("AdvSimd is not supported on the processor.")); } #elif defined(HOST_64BIT) // FP and SIMD support are enabled by default CPUCompileFlags.Set(InstructionSet_ArmBase); CPUCompileFlags.Set(InstructionSet_AdvSimd); // PF_ARM_V8_CRYPTO_INSTRUCTIONS_AVAILABLE (30) if (IsProcessorFeaturePresent(PF_ARM_V8_CRYPTO_INSTRUCTIONS_AVAILABLE)) { CPUCompileFlags.Set(InstructionSet_Aes); CPUCompileFlags.Set(InstructionSet_Sha1); CPUCompileFlags.Set(InstructionSet_Sha256); } // PF_ARM_V8_CRC32_INSTRUCTIONS_AVAILABLE (31) if (IsProcessorFeaturePresent(PF_ARM_V8_CRC32_INSTRUCTIONS_AVAILABLE)) { CPUCompileFlags.Set(InstructionSet_Crc32); } #endif // HOST_64BIT if (GetDataCacheZeroIDReg() == 4) { // DCZID_EL0<4> (DZP) indicates whether use of DC ZVA instructions is permitted (0) or prohibited (1). // DCZID_EL0<3:0> (BS) specifies Log2 of the block size in words. // // We set the flag when the instruction is permitted and the block size is 64 bytes. CPUCompileFlags.Set(InstructionSet_Dczva); } #endif // TARGET_ARM64 // Now that we've queried the actual hardware support, we need to adjust what is actually supported based // on some externally available config switches that exist so users can test code for downlevel hardware. #if defined(TARGET_AMD64) || defined(TARGET_X86) if (!CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_EnableHWIntrinsic)) { CPUCompileFlags.Clear(InstructionSet_X86Base); } if (!CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_EnableAES)) { CPUCompileFlags.Clear(InstructionSet_AES); } if (!CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_EnableAVX)) { CPUCompileFlags.Clear(InstructionSet_AVX); } if (!CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_EnableAVX2)) { CPUCompileFlags.Clear(InstructionSet_AVX2); } if (!CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_EnableAVXVNNI)) { CPUCompileFlags.Clear(InstructionSet_AVXVNNI); } if (!CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_EnableBMI1)) { CPUCompileFlags.Clear(InstructionSet_BMI1); } if (!CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_EnableBMI2)) { CPUCompileFlags.Clear(InstructionSet_BMI2); } if (!CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_EnableFMA)) { CPUCompileFlags.Clear(InstructionSet_FMA); } if (!CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_EnableLZCNT)) { CPUCompileFlags.Clear(InstructionSet_LZCNT); } if (!CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_EnablePCLMULQDQ)) { CPUCompileFlags.Clear(InstructionSet_PCLMULQDQ); } if (!CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_EnablePOPCNT)) { CPUCompileFlags.Clear(InstructionSet_POPCNT); } if (!CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_EnableSSE)) { CPUCompileFlags.Clear(InstructionSet_SSE); } if (!CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_EnableSSE2)) { CPUCompileFlags.Clear(InstructionSet_SSE2); } // We need to additionally check that EXTERNAL_EnableSSE3_4 is set, as that // is a prexisting config flag that controls the SSE3+ ISAs if (!CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_EnableSSE3) || !CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_EnableSSE3_4)) { CPUCompileFlags.Clear(InstructionSet_SSE3); } if (!CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_EnableSSE41)) { CPUCompileFlags.Clear(InstructionSet_SSE41); } if (!CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_EnableSSE42)) { CPUCompileFlags.Clear(InstructionSet_SSE42); } if (!CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_EnableSSSE3)) { CPUCompileFlags.Clear(InstructionSet_SSSE3); } #elif defined(TARGET_ARM64) if (!CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_EnableHWIntrinsic)) { CPUCompileFlags.Clear(InstructionSet_ArmBase); } if (!CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_EnableArm64AdvSimd)) { CPUCompileFlags.Clear(InstructionSet_AdvSimd); } if (!CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_EnableArm64Aes)) { CPUCompileFlags.Clear(InstructionSet_Aes); } if (!CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_EnableArm64Atomics)) { CPUCompileFlags.Clear(InstructionSet_Atomics); } if (!CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_EnableArm64Crc32)) { CPUCompileFlags.Clear(InstructionSet_Crc32); } if (!CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_EnableArm64Dczva)) { CPUCompileFlags.Clear(InstructionSet_Dczva); } if (!CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_EnableArm64Dp)) { CPUCompileFlags.Clear(InstructionSet_Dp); } if (!CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_EnableArm64Rdm)) { CPUCompileFlags.Clear(InstructionSet_Rdm); } if (!CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_EnableArm64Sha1)) { CPUCompileFlags.Clear(InstructionSet_Sha1); } if (!CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_EnableArm64Sha256)) { CPUCompileFlags.Clear(InstructionSet_Sha256); } #endif // These calls are very important as it ensures the flags are consistent with any // removals specified above. This includes removing corresponding 64-bit ISAs // and any other implications such as SSE2 depending on SSE or AdvSimd on ArmBase CPUCompileFlags.Set64BitInstructionSetVariants(); CPUCompileFlags.EnsureValidInstructionSetSupport(); m_CPUCompileFlags = CPUCompileFlags; } // Define some data that we can use to get a better idea of what happened when we get a Watson dump that indicates the JIT failed to load. // This will be used and updated by the JIT loading and initialization functions, and the data written will get written into a Watson dump. enum JIT_LOAD_JIT_ID { JIT_LOAD_MAIN = 500, // The "main" JIT. Normally, this is named "clrjit.dll". Start at a number that is somewhat uncommon (i.e., not zero or 1) to help distinguish from garbage, in process dumps. // 501 is JIT_LOAD_LEGACY on some platforms; please do not reuse this value. JIT_LOAD_ALTJIT = 502 // An "altjit". By default, named something like "clrjit_<targetos>_<target_arch>_<host_arch>.dll". Used both internally, as well as externally for JIT CTP builds. }; enum JIT_LOAD_STATUS { JIT_LOAD_STATUS_STARTING = 1001, // The JIT load process is starting. Start at a number that is somewhat uncommon (i.e., not zero or 1) to help distinguish from garbage, in process dumps. JIT_LOAD_STATUS_DONE_LOAD, // LoadLibrary of the JIT dll succeeded. JIT_LOAD_STATUS_DONE_GET_JITSTARTUP, // GetProcAddress for "jitStartup" succeeded. JIT_LOAD_STATUS_DONE_CALL_JITSTARTUP, // Calling jitStartup() succeeded. JIT_LOAD_STATUS_DONE_GET_GETJIT, // GetProcAddress for "getJit" succeeded. JIT_LOAD_STATUS_DONE_CALL_GETJIT, // Calling getJit() succeeded. JIT_LOAD_STATUS_DONE_CALL_GETVERSIONIDENTIFIER, // Calling ICorJitCompiler::getVersionIdentifier() succeeded. JIT_LOAD_STATUS_DONE_VERSION_CHECK, // The JIT-EE version identifier check succeeded. JIT_LOAD_STATUS_DONE, // The JIT load is complete, and successful. }; struct JIT_LOAD_DATA { JIT_LOAD_JIT_ID jld_id; // Which JIT are we currently loading? JIT_LOAD_STATUS jld_status; // The current load status of a JIT load attempt. HRESULT jld_hr; // If the JIT load fails, the last jld_status will be JIT_LOAD_STATUS_STARTING. // In that case, this will contain the HRESULT returned by LoadLibrary. // Otherwise, this will be S_OK (which is zero). }; // Here's the global data for JIT load and initialization state. JIT_LOAD_DATA g_JitLoadData; // Validate that the name used to load the JIT is just a simple file name // and does not contain something that could be used in a non-qualified path. // For example, using the string "..\..\..\myjit.dll" we might attempt to // load a JIT from the root of the drive. // // The minimal set of characters that we must check for and exclude are: // '\\' - (backslash) // '/' - (forward slash) // ':' - (colon) // // Returns false if we find any of these characters in 'pwzJitName' // Returns true if we reach the null terminator without encountering // any of these characters. // static bool ValidateJitName(LPCWSTR pwzJitName) { LPCWSTR pCurChar = pwzJitName; wchar_t curChar; do { curChar = *pCurChar; if ((curChar == '\\') || (curChar == '/') || (curChar == ':')) { // Return false if we find any of these character in 'pwzJitName' return false; } pCurChar++; } while (curChar != 0); // Return true; we have reached the null terminator // return true; } CORINFO_OS getClrVmOs(); // LoadAndInitializeJIT: load the JIT dll into the process, and initialize it (call the UtilCode initialization function, // check the JIT-EE interface GUID, etc.) // // Parameters: // // pwzJitName - The filename of the JIT .dll file to load. E.g., "altjit.dll". // phJit - On return, *phJit is the Windows module handle of the loaded JIT dll. It will be NULL if the load failed. // ppICorJitCompiler - On return, *ppICorJitCompiler is the ICorJitCompiler* returned by the JIT's getJit() entrypoint. // It is NULL if the JIT returns a NULL interface pointer, or if the JIT-EE interface GUID is mismatched. // Note that if the given JIT is loaded, but the interface is mismatched, then *phJit will be legal and non-NULL // even though *ppICorJitCompiler is NULL. This allows the caller to unload the JIT dll, if necessary // (nobody does this today). // pJitLoadData - Pointer to a structure that we update as we load and initialize the JIT to indicate how far we've gotten. This // is used to help understand problems we see with JIT loading that come in via Watson dumps. Since we don't throw // an exception immediately upon failure, we can lose information about what the failure was if we don't store this // information in a way that persists into a process dump. // targetOs - Target OS for JIT // static void LoadAndInitializeJIT(LPCWSTR pwzJitName, OUT HINSTANCE* phJit, OUT ICorJitCompiler** ppICorJitCompiler, IN OUT JIT_LOAD_DATA* pJitLoadData, CORINFO_OS targetOs) { STANDARD_VM_CONTRACT; _ASSERTE(phJit != NULL); _ASSERTE(ppICorJitCompiler != NULL); _ASSERTE(pJitLoadData != NULL); pJitLoadData->jld_status = JIT_LOAD_STATUS_STARTING; pJitLoadData->jld_hr = S_OK; *phJit = NULL; *ppICorJitCompiler = NULL; if (pwzJitName == nullptr) { pJitLoadData->jld_hr = E_FAIL; LOG((LF_JIT, LL_FATALERROR, "LoadAndInitializeJIT: pwzJitName is null")); return; } HRESULT hr = E_FAIL; if (ValidateJitName(pwzJitName)) { // Load JIT from next to CoreCLR binary PathString CoreClrFolderHolder; if (GetClrModulePathName(CoreClrFolderHolder) && !CoreClrFolderHolder.IsEmpty()) { SString::Iterator iter = CoreClrFolderHolder.End(); BOOL findSep = CoreClrFolderHolder.FindBack(iter, DIRECTORY_SEPARATOR_CHAR_W); if (findSep) { SString sJitName(pwzJitName); CoreClrFolderHolder.Replace(iter + 1, CoreClrFolderHolder.End() - (iter + 1), sJitName); *phJit = CLRLoadLibrary(CoreClrFolderHolder.GetUnicode()); if (*phJit != NULL) { hr = S_OK; } } } } else { LOG((LF_JIT, LL_FATALERROR, "LoadAndInitializeJIT: invalid characters in %S\n", pwzJitName)); } if (SUCCEEDED(hr)) { pJitLoadData->jld_status = JIT_LOAD_STATUS_DONE_LOAD; EX_TRY { typedef void (* pjitStartup)(ICorJitHost*); pjitStartup jitStartupFn = (pjitStartup) GetProcAddress(*phJit, "jitStartup"); if (jitStartupFn) { pJitLoadData->jld_status = JIT_LOAD_STATUS_DONE_GET_JITSTARTUP; (*jitStartupFn)(JitHost::getJitHost()); pJitLoadData->jld_status = JIT_LOAD_STATUS_DONE_CALL_JITSTARTUP; } typedef ICorJitCompiler* (__stdcall* pGetJitFn)(); pGetJitFn getJitFn = (pGetJitFn) GetProcAddress(*phJit, "getJit"); if (getJitFn) { pJitLoadData->jld_status = JIT_LOAD_STATUS_DONE_GET_GETJIT; ICorJitCompiler* pICorJitCompiler = (*getJitFn)(); if (pICorJitCompiler != NULL) { pJitLoadData->jld_status = JIT_LOAD_STATUS_DONE_CALL_GETJIT; GUID versionId; memset(&versionId, 0, sizeof(GUID)); pICorJitCompiler->getVersionIdentifier(&versionId); pJitLoadData->jld_status = JIT_LOAD_STATUS_DONE_CALL_GETVERSIONIDENTIFIER; if (memcmp(&versionId, &JITEEVersionIdentifier, sizeof(GUID)) == 0) { pJitLoadData->jld_status = JIT_LOAD_STATUS_DONE_VERSION_CHECK; // Specify to the JIT that it is working with the OS that we are compiled against pICorJitCompiler->setTargetOS(targetOs); // The JIT has loaded and passed the version identifier test, so publish the JIT interface to the caller. *ppICorJitCompiler = pICorJitCompiler; // The JIT is completely loaded and initialized now. pJitLoadData->jld_status = JIT_LOAD_STATUS_DONE; } else { // Mismatched version ID. Fail the load. LOG((LF_JIT, LL_FATALERROR, "LoadAndInitializeJIT: mismatched JIT version identifier in %S\n", pwzJitName)); } } else { LOG((LF_JIT, LL_FATALERROR, "LoadAndInitializeJIT: failed to get ICorJitCompiler in %S\n", pwzJitName)); } } else { LOG((LF_JIT, LL_FATALERROR, "LoadAndInitializeJIT: failed to find 'getJit' entrypoint in %S\n", pwzJitName)); } } EX_CATCH { LOG((LF_JIT, LL_FATALERROR, "LoadAndInitializeJIT: caught an exception trying to initialize %S\n", pwzJitName)); } EX_END_CATCH(SwallowAllExceptions) } else { pJitLoadData->jld_hr = hr; LOG((LF_JIT, LL_FATALERROR, "LoadAndInitializeJIT: failed to load %S, hr=0x%08x\n", pwzJitName, hr)); } } #ifdef FEATURE_MERGE_JIT_AND_ENGINE EXTERN_C void jitStartup(ICorJitHost* host); EXTERN_C ICorJitCompiler* getJit(); #endif // FEATURE_MERGE_JIT_AND_ENGINE BOOL EEJitManager::LoadJIT() { STANDARD_VM_CONTRACT; // If the JIT is already loaded, don't take the lock. if (IsJitLoaded()) return TRUE; // Use m_JitLoadCritSec to ensure that the JIT is loaded on one thread only CrstHolder chRead(&m_JitLoadCritSec); // Did someone load the JIT before we got the lock? if (IsJitLoaded()) return TRUE; SetCpuInfo(); ICorJitCompiler* newJitCompiler = NULL; #ifdef FEATURE_MERGE_JIT_AND_ENGINE EX_TRY { jitStartup(JitHost::getJitHost()); newJitCompiler = getJit(); // We don't need to call getVersionIdentifier(), since the JIT is linked together with the VM. } EX_CATCH { } EX_END_CATCH(SwallowAllExceptions) #else // !FEATURE_MERGE_JIT_AND_ENGINE m_JITCompiler = NULL; #if defined(TARGET_X86) || defined(TARGET_AMD64) m_JITCompilerOther = NULL; #endif g_JitLoadData.jld_id = JIT_LOAD_MAIN; LoadAndInitializeJIT(ExecutionManager::GetJitName(), &m_JITCompiler, &newJitCompiler, &g_JitLoadData, getClrVmOs()); #endif // !FEATURE_MERGE_JIT_AND_ENGINE #ifdef ALLOW_SXS_JIT // Do not load altjit.dll unless COMPlus_AltJit is set. // Even if the main JIT fails to load, if the user asks for an altjit we try to load it. // This allows us to display load error messages for loading altjit. ICorJitCompiler* newAltJitCompiler = NULL; LPWSTR altJitConfig; IfFailThrow(CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_AltJit, &altJitConfig)); m_AltJITCompiler = NULL; if (altJitConfig != NULL) { // Load the altjit into the system. // Note: altJitName must be declared as a const otherwise assigning the string // constructed by MAKEDLLNAME_W() to altJitName will cause a build break on Unix. LPCWSTR altJitName; IfFailThrow(CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_AltJitName, (LPWSTR*)&altJitName)); if (altJitName == NULL) { #ifdef TARGET_WINDOWS #ifdef TARGET_X86 altJitName = MAKEDLLNAME_W(W("clrjit_win_x86_x86")); #elif defined(TARGET_AMD64) altJitName = MAKEDLLNAME_W(W("clrjit_win_x64_x64")); #endif #else // TARGET_WINDOWS #ifdef TARGET_X86 altJitName = MAKEDLLNAME_W(W("clrjit_unix_x86_x86")); #elif defined(TARGET_AMD64) altJitName = MAKEDLLNAME_W(W("clrjit_unix_x64_x64")); #endif #endif // TARGET_WINDOWS #if defined(TARGET_ARM) altJitName = MAKEDLLNAME_W(W("clrjit_universal_arm_arm")); #elif defined(TARGET_ARM64) altJitName = MAKEDLLNAME_W(W("clrjit_universal_arm64_arm64")); #endif // TARGET_ARM } CORINFO_OS targetOs = getClrVmOs(); LPWSTR altJitOsConfig; IfFailThrow(CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_AltJitOs, &altJitOsConfig)); if (altJitOsConfig != NULL) { // We have some inconsistency all over the place with osx vs macos, let's handle both here if ((_wcsicmp(altJitOsConfig, W("macos")) == 0) || (_wcsicmp(altJitOsConfig, W("osx")) == 0)) { targetOs = CORINFO_MACOS; } else if ((_wcsicmp(altJitOsConfig, W("linux")) == 0) || (_wcsicmp(altJitOsConfig, W("unix")) == 0)) { targetOs = CORINFO_UNIX; } else if (_wcsicmp(altJitOsConfig, W("windows")) == 0) { targetOs = CORINFO_WINNT; } else { _ASSERTE(!"Unknown AltJitOS, it has to be either Windows, Linux or macOS"); } } g_JitLoadData.jld_id = JIT_LOAD_ALTJIT; LoadAndInitializeJIT(altJitName, &m_AltJITCompiler, &newAltJitCompiler, &g_JitLoadData, targetOs); } #endif // ALLOW_SXS_JIT // Publish the compilers. #ifdef ALLOW_SXS_JIT m_AltJITRequired = (altJitConfig != NULL); m_alternateJit = newAltJitCompiler; #endif // ALLOW_SXS_JIT m_jit = newJitCompiler; // Failing to load the main JIT is a failure. // If the user requested an altjit and we failed to load an altjit, that is also a failure. // In either failure case, we'll rip down the VM (so no need to clean up (unload) either JIT that did load successfully. return IsJitLoaded(); } //************************************************************************** CodeFragmentHeap::CodeFragmentHeap(LoaderAllocator * pAllocator, StubCodeBlockKind kind) : m_pAllocator(pAllocator), m_pFreeBlocks(NULL), m_kind(kind), // CRST_DEBUGGER_THREAD - We take this lock on debugger thread during EnC add meth m_CritSec(CrstCodeFragmentHeap, CrstFlags(CRST_UNSAFE_ANYMODE | CRST_DEBUGGER_THREAD)) { WRAPPER_NO_CONTRACT; } void CodeFragmentHeap::AddBlock(VOID * pMem, size_t dwSize) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; // The new "nothrow" below failure is handled in a non-fault way, so // make sure that callers with FORBID_FAULT can call this method without // firing the contract violation assert. PERMANENT_CONTRACT_VIOLATION(FaultViolation, ReasonContractInfrastructure); FreeBlock * pBlock = new (nothrow) FreeBlock; // In the OOM case we don't add the block to the list of free blocks // as we are in a FORBID_FAULT code path. if (pBlock != NULL) { pBlock->m_pNext = m_pFreeBlocks; pBlock->m_pBlock = pMem; pBlock->m_dwSize = dwSize; m_pFreeBlocks = pBlock; } } void CodeFragmentHeap::RemoveBlock(FreeBlock ** ppBlock) { LIMITED_METHOD_CONTRACT; FreeBlock * pBlock = *ppBlock; *ppBlock = pBlock->m_pNext; delete pBlock; } CodeFragmentHeap::~CodeFragmentHeap() { FreeBlock* pBlock = m_pFreeBlocks; while (pBlock != NULL) { FreeBlock *pNextBlock = pBlock->m_pNext; delete pBlock; pBlock = pNextBlock; } } TaggedMemAllocPtr CodeFragmentHeap::RealAllocAlignedMem(size_t dwRequestedSize ,unsigned dwAlignment #ifdef _DEBUG ,_In_ _In_z_ const char *szFile ,int lineNum #endif ) { CrstHolder ch(&m_CritSec); dwRequestedSize = ALIGN_UP(dwRequestedSize, sizeof(TADDR)); // We will try to batch up allocation of small blocks into one large allocation #define SMALL_BLOCK_THRESHOLD 0x100 SIZE_T nFreeSmallBlocks = 0; FreeBlock ** ppBestFit = NULL; FreeBlock ** ppFreeBlock = &m_pFreeBlocks; while (*ppFreeBlock != NULL) { FreeBlock * pFreeBlock = *ppFreeBlock; if (((BYTE *)pFreeBlock->m_pBlock + pFreeBlock->m_dwSize) - (BYTE *)ALIGN_UP(pFreeBlock->m_pBlock, dwAlignment) >= (SSIZE_T)dwRequestedSize) { if (ppBestFit == NULL || pFreeBlock->m_dwSize < (*ppBestFit)->m_dwSize) ppBestFit = ppFreeBlock; } else { if (pFreeBlock->m_dwSize < SMALL_BLOCK_THRESHOLD) nFreeSmallBlocks++; } ppFreeBlock = &(*ppFreeBlock)->m_pNext; } VOID * pMem; SIZE_T dwSize; if (ppBestFit != NULL) { pMem = (*ppBestFit)->m_pBlock; dwSize = (*ppBestFit)->m_dwSize; RemoveBlock(ppBestFit); } else { dwSize = dwRequestedSize; if (dwSize < SMALL_BLOCK_THRESHOLD) dwSize = 4 * SMALL_BLOCK_THRESHOLD; pMem = ExecutionManager::GetEEJitManager()->allocCodeFragmentBlock(dwSize, dwAlignment, m_pAllocator, m_kind); } SIZE_T dwExtra = (BYTE *)ALIGN_UP(pMem, dwAlignment) - (BYTE *)pMem; _ASSERTE(dwSize >= dwExtra + dwRequestedSize); SIZE_T dwRemaining = dwSize - (dwExtra + dwRequestedSize); // Avoid accumulation of too many small blocks. The more small free blocks we have, the more picky we are going to be about adding new ones. if ((dwRemaining >= max(sizeof(FreeBlock), sizeof(StubPrecode)) + (SMALL_BLOCK_THRESHOLD / 0x10) * nFreeSmallBlocks) || (dwRemaining >= SMALL_BLOCK_THRESHOLD)) { AddBlock((BYTE *)pMem + dwExtra + dwRequestedSize, dwRemaining); dwSize -= dwRemaining; } TaggedMemAllocPtr tmap; tmap.m_pMem = pMem; tmap.m_dwRequestedSize = dwSize; tmap.m_pHeap = this; tmap.m_dwExtra = dwExtra; #ifdef _DEBUG tmap.m_szFile = szFile; tmap.m_lineNum = lineNum; #endif return tmap; } void CodeFragmentHeap::RealBackoutMem(void *pMem , size_t dwSize #ifdef _DEBUG , _In_ _In_z_ const char *szFile , int lineNum , _In_ _In_z_ const char *szAllocFile , int allocLineNum #endif ) { CrstHolder ch(&m_CritSec); { ExecutableWriterHolder<BYTE> memWriterHolder((BYTE*)pMem, dwSize); ZeroMemory(memWriterHolder.GetRW(), dwSize); } // // Try to coalesce blocks if possible // FreeBlock ** ppFreeBlock = &m_pFreeBlocks; while (*ppFreeBlock != NULL) { FreeBlock * pFreeBlock = *ppFreeBlock; if ((BYTE *)pFreeBlock == (BYTE *)pMem + dwSize) { // pMem = pMem; dwSize += pFreeBlock->m_dwSize; RemoveBlock(ppFreeBlock); continue; } else if ((BYTE *)pFreeBlock + pFreeBlock->m_dwSize == (BYTE *)pMem) { pMem = pFreeBlock; dwSize += pFreeBlock->m_dwSize; RemoveBlock(ppFreeBlock); continue; } ppFreeBlock = &(*ppFreeBlock)->m_pNext; } AddBlock(pMem, dwSize); } //************************************************************************** LoaderCodeHeap::LoaderCodeHeap() : m_LoaderHeap(NULL, // RangeList *pRangeList TRUE), // BOOL fMakeExecutable m_cbMinNextPad(0) { WRAPPER_NO_CONTRACT; } void ThrowOutOfMemoryWithinRange() { CONTRACTL { THROWS; GC_NOTRIGGER; } CONTRACTL_END; // Allow breaking into debugger or terminating the process when this exception occurs switch (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_BreakOnOutOfMemoryWithinRange)) { case 1: DebugBreak(); break; case 2: EEPOLICY_HANDLE_FATAL_ERROR(COR_E_OUTOFMEMORY); break; default: break; } EX_THROW(EEMessageException, (kOutOfMemoryException, IDS_EE_OUT_OF_MEMORY_WITHIN_RANGE)); } #ifdef TARGET_AMD64 BYTE * EEJitManager::AllocateFromEmergencyJumpStubReserve(const BYTE * loAddr, const BYTE * hiAddr, SIZE_T * pReserveSize) { CONTRACTL { NOTHROW; GC_NOTRIGGER; PRECONDITION(m_CodeHeapCritSec.OwnedByCurrentThread()); } CONTRACTL_END; for (EmergencyJumpStubReserve ** ppPrev = &m_pEmergencyJumpStubReserveList; *ppPrev != NULL; ppPrev = &(*ppPrev)->m_pNext) { EmergencyJumpStubReserve * pList = *ppPrev; if (loAddr <= pList->m_ptr && pList->m_ptr + pList->m_size < hiAddr) { *ppPrev = pList->m_pNext; BYTE * pBlock = pList->m_ptr; *pReserveSize = pList->m_size; delete pList; return pBlock; } } return NULL; } VOID EEJitManager::EnsureJumpStubReserve(BYTE * pImageBase, SIZE_T imageSize, SIZE_T reserveSize) { CONTRACTL { THROWS; GC_NOTRIGGER; } CONTRACTL_END; CrstHolder ch(&m_CodeHeapCritSec); BYTE * loAddr = pImageBase + imageSize + INT32_MIN; if (loAddr > pImageBase) loAddr = NULL; // overflow BYTE * hiAddr = pImageBase + INT32_MAX; if (hiAddr < pImageBase) hiAddr = (BYTE *)UINT64_MAX; // overflow for (EmergencyJumpStubReserve * pList = m_pEmergencyJumpStubReserveList; pList != NULL; pList = pList->m_pNext) { if (loAddr <= pList->m_ptr && pList->m_ptr + pList->m_size < hiAddr) { SIZE_T used = min(reserveSize, pList->m_free); pList->m_free -= used; reserveSize -= used; if (reserveSize == 0) return; } } // Try several different strategies - the most efficient one first int allocMode = 0; // Try to reserve at least 16MB at a time SIZE_T allocChunk = max(ALIGN_UP(reserveSize, VIRTUAL_ALLOC_RESERVE_GRANULARITY), 16*1024*1024); while (reserveSize > 0) { NewHolder<EmergencyJumpStubReserve> pNewReserve(new EmergencyJumpStubReserve()); for (;;) { BYTE * loAddrCurrent = loAddr; BYTE * hiAddrCurrent = hiAddr; switch (allocMode) { case 0: // First, try to allocate towards the center of the allowed range. It is more likely to // satisfy subsequent reservations. loAddrCurrent = loAddr + (hiAddr - loAddr) / 8; hiAddrCurrent = hiAddr - (hiAddr - loAddr) / 8; break; case 1: // Try the whole allowed range break; case 2: // If the large allocation failed, retry with small chunk size allocChunk = VIRTUAL_ALLOC_RESERVE_GRANULARITY; break; default: return; // Unable to allocate the reserve - give up } pNewReserve->m_ptr = (BYTE*)ExecutableAllocator::Instance()->ReserveWithinRange(allocChunk, loAddrCurrent, hiAddrCurrent); if (pNewReserve->m_ptr != NULL) break; // Retry with the next allocation strategy allocMode++; } SIZE_T used = min(allocChunk, reserveSize); reserveSize -= used; pNewReserve->m_size = allocChunk; pNewReserve->m_free = allocChunk - used; // Add it to the list pNewReserve->m_pNext = m_pEmergencyJumpStubReserveList; m_pEmergencyJumpStubReserveList = pNewReserve.Extract(); } } #endif // TARGET_AMD64 static size_t GetDefaultReserveForJumpStubs(size_t codeHeapSize) { LIMITED_METHOD_CONTRACT; #if defined(TARGET_AMD64) || defined(TARGET_ARM64) // // Keep a small default reserve at the end of the codeheap for jump stubs. It should reduce // chance that we won't be able allocate jump stub because of lack of suitable address space. // static ConfigDWORD configCodeHeapReserveForJumpStubs; int percentReserveForJumpStubs = configCodeHeapReserveForJumpStubs.val(CLRConfig::INTERNAL_CodeHeapReserveForJumpStubs); size_t reserveForJumpStubs = percentReserveForJumpStubs * (codeHeapSize / 100); size_t minReserveForJumpStubs = sizeof(CodeHeader) + sizeof(JumpStubBlockHeader) + (size_t) DEFAULT_JUMPSTUBS_PER_BLOCK * BACK_TO_BACK_JUMP_ALLOCATE_SIZE + CODE_SIZE_ALIGN + BYTES_PER_BUCKET; return max(reserveForJumpStubs, minReserveForJumpStubs); #else return 0; #endif } HeapList* LoaderCodeHeap::CreateCodeHeap(CodeHeapRequestInfo *pInfo, LoaderHeap *pJitMetaHeap) { CONTRACT(HeapList *) { THROWS; GC_NOTRIGGER; POSTCONDITION((RETVAL != NULL) || !pInfo->getThrowOnOutOfMemoryWithinRange()); } CONTRACT_END; size_t reserveSize = pInfo->getReserveSize(); size_t initialRequestSize = pInfo->getRequestSize(); const BYTE * loAddr = pInfo->m_loAddr; const BYTE * hiAddr = pInfo->m_hiAddr; // Make sure that what we are reserving will fix inside a DWORD if (reserveSize != (DWORD) reserveSize) { _ASSERTE(!"reserveSize does not fit in a DWORD"); EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE); } LOG((LF_JIT, LL_INFO100, "Request new LoaderCodeHeap::CreateCodeHeap(%08x, %08x, for loader allocator" FMT_ADDR "in" FMT_ADDR ".." FMT_ADDR ")\n", (DWORD) reserveSize, (DWORD) initialRequestSize, DBG_ADDR(pInfo->m_pAllocator), DBG_ADDR(loAddr), DBG_ADDR(hiAddr) )); NewHolder<LoaderCodeHeap> pCodeHeap(new LoaderCodeHeap()); BYTE * pBaseAddr = NULL; DWORD dwSizeAcquiredFromInitialBlock = 0; bool fAllocatedFromEmergencyJumpStubReserve = false; pBaseAddr = (BYTE *)pInfo->m_pAllocator->GetCodeHeapInitialBlock(loAddr, hiAddr, (DWORD)initialRequestSize, &dwSizeAcquiredFromInitialBlock); if (pBaseAddr != NULL) { pCodeHeap->m_LoaderHeap.SetReservedRegion(pBaseAddr, dwSizeAcquiredFromInitialBlock, FALSE); } else { if (loAddr != NULL || hiAddr != NULL) { #ifdef _DEBUG // Always exercise the fallback path in the caller when forced relocs are turned on if (!pInfo->getThrowOnOutOfMemoryWithinRange() && PEDecoder::GetForceRelocs()) RETURN NULL; #endif pBaseAddr = (BYTE*)ExecutableAllocator::Instance()->ReserveWithinRange(reserveSize, loAddr, hiAddr); if (!pBaseAddr) { // Conserve emergency jump stub reserve until when it is really needed if (!pInfo->getThrowOnOutOfMemoryWithinRange()) RETURN NULL; #ifdef TARGET_AMD64 pBaseAddr = ExecutionManager::GetEEJitManager()->AllocateFromEmergencyJumpStubReserve(loAddr, hiAddr, &reserveSize); if (!pBaseAddr) ThrowOutOfMemoryWithinRange(); fAllocatedFromEmergencyJumpStubReserve = true; #else ThrowOutOfMemoryWithinRange(); #endif // TARGET_AMD64 } } else { pBaseAddr = (BYTE*)ExecutableAllocator::Instance()->Reserve(reserveSize); if (!pBaseAddr) ThrowOutOfMemory(); } pCodeHeap->m_LoaderHeap.SetReservedRegion(pBaseAddr, reserveSize, TRUE); } // this first allocation is critical as it sets up correctly the loader heap info HeapList *pHp = new HeapList; #if defined(TARGET_AMD64) || defined(TARGET_ARM64) pHp->CLRPersonalityRoutine = (BYTE *)pCodeHeap->m_LoaderHeap.AllocMem(JUMP_ALLOCATE_SIZE); #else // Ensure that the heap has a reserved block of memory and so the GetReservedBytesFree() // and GetAllocPtr() calls below return nonzero values. pCodeHeap->m_LoaderHeap.ReservePages(1); #endif pHp->pHeap = pCodeHeap; size_t heapSize = pCodeHeap->m_LoaderHeap.GetReservedBytesFree(); size_t nibbleMapSize = HEAP2MAPSIZE(ROUND_UP_TO_PAGE(heapSize)); pHp->startAddress = (TADDR)pCodeHeap->m_LoaderHeap.GetAllocPtr(); pHp->endAddress = pHp->startAddress; pHp->maxCodeHeapSize = heapSize; pHp->reserveForJumpStubs = fAllocatedFromEmergencyJumpStubReserve ? pHp->maxCodeHeapSize : GetDefaultReserveForJumpStubs(pHp->maxCodeHeapSize); _ASSERTE(heapSize >= initialRequestSize); // We do not need to memset this memory, since ClrVirtualAlloc() guarantees that the memory is zero. // Furthermore, if we avoid writing to it, these pages don't come into our working set pHp->mapBase = ROUND_DOWN_TO_PAGE(pHp->startAddress); // round down to next lower page align pHp->pHdrMap = (DWORD*)(void*)pJitMetaHeap->AllocMem(S_SIZE_T(nibbleMapSize)); LOG((LF_JIT, LL_INFO100, "Created new CodeHeap(" FMT_ADDR ".." FMT_ADDR ")\n", DBG_ADDR(pHp->startAddress), DBG_ADDR(pHp->startAddress+pHp->maxCodeHeapSize) )); #ifdef TARGET_64BIT ExecutableWriterHolder<BYTE> personalityRoutineWriterHolder(pHp->CLRPersonalityRoutine, 12); emitJump(pHp->CLRPersonalityRoutine, personalityRoutineWriterHolder.GetRW(), (void *)ProcessCLRException); #endif // TARGET_64BIT pCodeHeap.SuppressRelease(); RETURN pHp; } void * LoaderCodeHeap::AllocMemForCode_NoThrow(size_t header, size_t size, DWORD alignment, size_t reserveForJumpStubs) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; if (m_cbMinNextPad > (SSIZE_T)header) header = m_cbMinNextPad; void * p = m_LoaderHeap.AllocMemForCode_NoThrow(header, size, alignment, reserveForJumpStubs); if (p == NULL) return NULL; // If the next allocation would have started in the same nibble map entry, allocate extra space to prevent it from happening // Note that m_cbMinNextPad can be negative m_cbMinNextPad = ALIGN_UP((SIZE_T)p + 1, BYTES_PER_BUCKET) - ((SIZE_T)p + size); return p; } void CodeHeapRequestInfo::Init() { CONTRACTL { NOTHROW; GC_NOTRIGGER; PRECONDITION((m_hiAddr == 0) || ((m_loAddr < m_hiAddr) && ((m_loAddr + m_requestSize) < m_hiAddr))); } CONTRACTL_END; if (m_pAllocator == NULL) m_pAllocator = m_pMD->GetLoaderAllocator(); m_isDynamicDomain = (m_pMD != NULL) && m_pMD->IsLCGMethod(); m_isCollectible = m_pAllocator->IsCollectible(); m_throwOnOutOfMemoryWithinRange = true; } #ifdef FEATURE_EH_FUNCLETS #ifdef HOST_64BIT extern "C" PT_RUNTIME_FUNCTION GetRuntimeFunctionCallback(IN ULONG64 ControlPc, IN PVOID Context) #else extern "C" PT_RUNTIME_FUNCTION GetRuntimeFunctionCallback(IN ULONG ControlPc, IN PVOID Context) #endif { WRAPPER_NO_CONTRACT; PT_RUNTIME_FUNCTION prf = NULL; // We must preserve this so that GCStress=4 eh processing doesnt kill last error. BEGIN_PRESERVE_LAST_ERROR; #ifdef ENABLE_CONTRACTS // Some 64-bit OOM tests use the hosting interface to re-enter the CLR via // RtlVirtualUnwind to track unique stacks at each failure point. RtlVirtualUnwind can // result in the EEJitManager taking a reader lock. This, in turn, results in a // CANNOT_TAKE_LOCK contract violation if a CANNOT_TAKE_LOCK function were on the stack // at the time. While it's theoretically possible for "real" hosts also to re-enter the // CLR via RtlVirtualUnwind, generally they don't, and we'd actually like to catch a real // host causing such a contract violation. Therefore, we'd like to suppress such contract // asserts when these OOM tests are running, but continue to enforce the contracts by // default. This function returns whether to suppress locking violations. CONDITIONAL_CONTRACT_VIOLATION( TakesLockViolation, g_pConfig->SuppressLockViolationsOnReentryFromOS()); #endif // ENABLE_CONTRACTS EECodeInfo codeInfo((PCODE)ControlPc); if (codeInfo.IsValid()) prf = codeInfo.GetFunctionEntry(); LOG((LF_EH, LL_INFO1000000, "GetRuntimeFunctionCallback(%p) returned %p\n", ControlPc, prf)); END_PRESERVE_LAST_ERROR; return prf; } #endif // FEATURE_EH_FUNCLETS HeapList* EEJitManager::NewCodeHeap(CodeHeapRequestInfo *pInfo, DomainCodeHeapList *pADHeapList) { CONTRACT(HeapList *) { THROWS; GC_NOTRIGGER; PRECONDITION(m_CodeHeapCritSec.OwnedByCurrentThread()); POSTCONDITION((RETVAL != NULL) || !pInfo->getThrowOnOutOfMemoryWithinRange()); } CONTRACT_END; size_t initialRequestSize = pInfo->getRequestSize(); size_t minReserveSize = VIRTUAL_ALLOC_RESERVE_GRANULARITY; // ( 64 KB) #ifdef HOST_64BIT if (pInfo->m_hiAddr == 0) { if (pADHeapList->m_CodeHeapList.Count() > CODE_HEAP_SIZE_INCREASE_THRESHOLD) { minReserveSize *= 4; // Increase the code heap size to 256 KB for workloads with a lot of code. } // For non-DynamicDomains that don't have a loAddr/hiAddr range // we bump up the reserve size for the 64-bit platforms if (!pInfo->IsDynamicDomain()) { minReserveSize *= 8; // CodeHeaps are larger on AMD64 (256 KB to 2048 KB) } } #endif size_t reserveSize = initialRequestSize; #if defined(TARGET_AMD64) || defined(TARGET_ARM64) reserveSize += JUMP_ALLOCATE_SIZE; #endif if (reserveSize < minReserveSize) reserveSize = minReserveSize; reserveSize = ALIGN_UP(reserveSize, VIRTUAL_ALLOC_RESERVE_GRANULARITY); pInfo->setReserveSize(reserveSize); HeapList *pHp = NULL; DWORD flags = RangeSection::RANGE_SECTION_CODEHEAP; if (pInfo->IsDynamicDomain()) { flags |= RangeSection::RANGE_SECTION_COLLECTIBLE; pHp = HostCodeHeap::CreateCodeHeap(pInfo, this); } else { LoaderHeap *pJitMetaHeap = pADHeapList->m_pAllocator->GetLowFrequencyHeap(); if (pInfo->IsCollectible()) flags |= RangeSection::RANGE_SECTION_COLLECTIBLE; pHp = LoaderCodeHeap::CreateCodeHeap(pInfo, pJitMetaHeap); } if (pHp == NULL) { _ASSERTE(!pInfo->getThrowOnOutOfMemoryWithinRange()); RETURN(NULL); } _ASSERTE (pHp != NULL); _ASSERTE (pHp->maxCodeHeapSize >= initialRequestSize); pHp->SetNext(GetCodeHeapList()); EX_TRY { TADDR pStartRange = pHp->GetModuleBase(); TADDR pEndRange = (TADDR) &((BYTE*)pHp->startAddress)[pHp->maxCodeHeapSize]; ExecutionManager::AddCodeRange(pStartRange, pEndRange, this, (RangeSection::RangeSectionFlags)flags, pHp); // // add a table to cover each range in the range list // InstallEEFunctionTable( (PVOID)pStartRange, // this is just an ID that gets passed to RtlDeleteFunctionTable; (PVOID)pStartRange, (ULONG)((ULONG64)pEndRange - (ULONG64)pStartRange), GetRuntimeFunctionCallback, this, DYNFNTABLE_JIT); } EX_CATCH { // If we failed to alloc memory in ExecutionManager::AddCodeRange() // then we will delete the LoaderHeap that we allocated delete pHp->pHeap; delete pHp; pHp = NULL; } EX_END_CATCH(SwallowAllExceptions) if (pHp == NULL) { ThrowOutOfMemory(); } m_pCodeHeap = pHp; HeapList **ppHeapList = pADHeapList->m_CodeHeapList.AppendThrowing(); *ppHeapList = pHp; RETURN(pHp); } void* EEJitManager::allocCodeRaw(CodeHeapRequestInfo *pInfo, size_t header, size_t blockSize, unsigned align, HeapList ** ppCodeHeap) { CONTRACT(void *) { THROWS; GC_NOTRIGGER; PRECONDITION(m_CodeHeapCritSec.OwnedByCurrentThread()); POSTCONDITION((RETVAL != NULL) || !pInfo->getThrowOnOutOfMemoryWithinRange()); } CONTRACT_END; pInfo->setRequestSize(header+blockSize+(align-1)+pInfo->getReserveForJumpStubs()); void * mem = NULL; HeapList * pCodeHeap = NULL; DomainCodeHeapList *pList = NULL; // Avoid going through the full list in the common case - try to use the most recently used codeheap if (pInfo->IsDynamicDomain()) { pCodeHeap = (HeapList *)pInfo->m_pAllocator->m_pLastUsedDynamicCodeHeap; pInfo->m_pAllocator->m_pLastUsedDynamicCodeHeap = NULL; } else { pCodeHeap = (HeapList *)pInfo->m_pAllocator->m_pLastUsedCodeHeap; pInfo->m_pAllocator->m_pLastUsedCodeHeap = NULL; } // If we will use a cached code heap, ensure that the code heap meets the constraints if (pCodeHeap && CanUseCodeHeap(pInfo, pCodeHeap)) { mem = (pCodeHeap->pHeap)->AllocMemForCode_NoThrow(header, blockSize, align, pInfo->getReserveForJumpStubs()); } if (mem == NULL) { pList = GetCodeHeapList(pInfo, pInfo->m_pAllocator); if (pList != NULL) { for (int i = 0; i < pList->m_CodeHeapList.Count(); i++) { pCodeHeap = pList->m_CodeHeapList[i]; // Validate that the code heap can be used for the current request if (CanUseCodeHeap(pInfo, pCodeHeap)) { mem = (pCodeHeap->pHeap)->AllocMemForCode_NoThrow(header, blockSize, align, pInfo->getReserveForJumpStubs()); if (mem != NULL) break; } } } if (mem == NULL) { // Let us create a new heap. if (pList == NULL) { // not found so need to create the first one pList = CreateCodeHeapList(pInfo); _ASSERTE(pList == GetCodeHeapList(pInfo, pInfo->m_pAllocator)); } _ASSERTE(pList); pCodeHeap = NewCodeHeap(pInfo, pList); if (pCodeHeap == NULL) { _ASSERTE(!pInfo->getThrowOnOutOfMemoryWithinRange()); RETURN(NULL); } mem = (pCodeHeap->pHeap)->AllocMemForCode_NoThrow(header, blockSize, align, pInfo->getReserveForJumpStubs()); if (mem == NULL) ThrowOutOfMemory(); _ASSERTE(mem); } } if (pInfo->IsDynamicDomain()) { pInfo->m_pAllocator->m_pLastUsedDynamicCodeHeap = pCodeHeap; } else { pInfo->m_pAllocator->m_pLastUsedCodeHeap = pCodeHeap; } // Record the pCodeHeap value into ppCodeHeap *ppCodeHeap = pCodeHeap; _ASSERTE((TADDR)mem >= pCodeHeap->startAddress); if (((TADDR) mem)+blockSize > (TADDR)pCodeHeap->endAddress) { // Update the CodeHeap endAddress pCodeHeap->endAddress = (TADDR)mem+blockSize; } RETURN(mem); } void EEJitManager::allocCode(MethodDesc* pMD, size_t blockSize, size_t reserveForJumpStubs, CorJitAllocMemFlag flag, CodeHeader** ppCodeHeader, CodeHeader** ppCodeHeaderRW, size_t* pAllocatedSize, HeapList** ppCodeHeap #ifdef USE_INDIRECT_CODEHEADER , BYTE** ppRealHeader #endif #ifdef FEATURE_EH_FUNCLETS , UINT nUnwindInfos #endif ) { CONTRACTL { THROWS; GC_NOTRIGGER; } CONTRACTL_END; // // Alignment // unsigned alignment = CODE_SIZE_ALIGN; if ((flag & CORJIT_ALLOCMEM_FLG_32BYTE_ALIGN) != 0) { alignment = max(alignment, 32); } else if ((flag & CORJIT_ALLOCMEM_FLG_16BYTE_ALIGN) != 0) { alignment = max(alignment, 16); } #if defined(TARGET_X86) // when not optimizing for code size, 8-byte align the method entry point, so that // the JIT can in turn 8-byte align the loop entry headers. else if ((g_pConfig->GenOptimizeType() != OPT_SIZE)) { alignment = max(alignment, 8); } #endif // // Compute header layout // SIZE_T totalSize = blockSize; CodeHeader * pCodeHdr = NULL; CodeHeader * pCodeHdrRW = NULL; CodeHeapRequestInfo requestInfo(pMD); #if defined(FEATURE_JIT_PITCHING) if (pMD && pMD->IsPitchable() && CLRConfig::GetConfigValue(CLRConfig::INTERNAL_JitPitchMethodSizeThreshold) < blockSize) { requestInfo.SetDynamicDomain(); } #endif requestInfo.setReserveForJumpStubs(reserveForJumpStubs); #if defined(USE_INDIRECT_CODEHEADER) SIZE_T realHeaderSize = offsetof(RealCodeHeader, unwindInfos[0]) + (sizeof(T_RUNTIME_FUNCTION) * nUnwindInfos); // if this is a LCG method then we will be allocating the RealCodeHeader // following the code so that the code block can be removed easily by // the LCG code heap. if (requestInfo.IsDynamicDomain()) { totalSize = ALIGN_UP(totalSize, sizeof(void*)) + realHeaderSize; static_assert_no_msg(CODE_SIZE_ALIGN >= sizeof(void*)); } #endif // USE_INDIRECT_CODEHEADER // Scope the lock { CrstHolder ch(&m_CodeHeapCritSec); *ppCodeHeap = NULL; TADDR pCode = (TADDR) allocCodeRaw(&requestInfo, sizeof(CodeHeader), totalSize, alignment, ppCodeHeap); _ASSERTE(*ppCodeHeap); if (pMD->IsLCGMethod()) { pMD->AsDynamicMethodDesc()->GetLCGMethodResolver()->m_recordCodePointer = (void*) pCode; } _ASSERTE(IS_ALIGNED(pCode, alignment)); pCodeHdr = ((CodeHeader *)pCode) - 1; *pAllocatedSize = sizeof(CodeHeader) + totalSize; if (ExecutableAllocator::IsWXORXEnabled()) { pCodeHdrRW = (CodeHeader *)new BYTE[*pAllocatedSize]; } else { pCodeHdrRW = pCodeHdr; } #ifdef USE_INDIRECT_CODEHEADER if (requestInfo.IsDynamicDomain()) { // Set the real code header to the writeable mapping so that we can set its members via the CodeHeader methods below pCodeHdrRW->SetRealCodeHeader((BYTE *)(pCodeHdrRW + 1) + ALIGN_UP(blockSize, sizeof(void*))); } else { // TODO: think about the CodeHeap carrying around a RealCodeHeader chunking mechanism // // allocate the real header in the low frequency heap BYTE* pRealHeader = (BYTE*)(void*)pMD->GetLoaderAllocator()->GetLowFrequencyHeap()->AllocMem(S_SIZE_T(realHeaderSize)); pCodeHdrRW->SetRealCodeHeader(pRealHeader); } #endif pCodeHdrRW->SetDebugInfo(NULL); pCodeHdrRW->SetEHInfo(NULL); pCodeHdrRW->SetGCInfo(NULL); pCodeHdrRW->SetMethodDesc(pMD); #ifdef FEATURE_EH_FUNCLETS pCodeHdrRW->SetNumberOfUnwindInfos(nUnwindInfos); #endif #ifdef USE_INDIRECT_CODEHEADER if (requestInfo.IsDynamicDomain()) { *ppRealHeader = (BYTE*)pCode + ALIGN_UP(blockSize, sizeof(void*)); } else { *ppRealHeader = NULL; } #endif // USE_INDIRECT_CODEHEADER } *ppCodeHeader = pCodeHdr; *ppCodeHeaderRW = pCodeHdrRW; } EEJitManager::DomainCodeHeapList *EEJitManager::GetCodeHeapList(CodeHeapRequestInfo *pInfo, LoaderAllocator *pAllocator, BOOL fDynamicOnly) { CONTRACTL { NOTHROW; GC_NOTRIGGER; PRECONDITION(m_CodeHeapCritSec.OwnedByCurrentThread()); } CONTRACTL_END; DomainCodeHeapList *pList = NULL; DomainCodeHeapList **ppList = NULL; int count = 0; // get the appropriate list of heaps // pMD is NULL for NGen modules during Module::LoadTokenTables if (fDynamicOnly || (pInfo != NULL && pInfo->IsDynamicDomain())) { ppList = m_DynamicDomainCodeHeaps.Table(); count = m_DynamicDomainCodeHeaps.Count(); } else { ppList = m_DomainCodeHeaps.Table(); count = m_DomainCodeHeaps.Count(); } // this is a virtual call - pull it out of the loop BOOL fCanUnload = pAllocator->CanUnload(); // look for a DomainCodeHeapList for (int i=0; i < count; i++) { if (ppList[i]->m_pAllocator == pAllocator || (!fCanUnload && !ppList[i]->m_pAllocator->CanUnload())) { pList = ppList[i]; break; } } return pList; } bool EEJitManager::CanUseCodeHeap(CodeHeapRequestInfo *pInfo, HeapList *pCodeHeap) { CONTRACTL { NOTHROW; GC_NOTRIGGER; PRECONDITION(m_CodeHeapCritSec.OwnedByCurrentThread()); } CONTRACTL_END; bool retVal = false; if ((pInfo->m_loAddr == 0) && (pInfo->m_hiAddr == 0)) { // We have no constraint so this non empty heap will be able to satisfy our request if (pInfo->IsDynamicDomain()) { _ASSERTE(pCodeHeap->reserveForJumpStubs == 0); retVal = true; } else { BYTE * lastAddr = (BYTE *) pCodeHeap->startAddress + pCodeHeap->maxCodeHeapSize; BYTE * loRequestAddr = (BYTE *) pCodeHeap->endAddress; BYTE * hiRequestAddr = loRequestAddr + pInfo->getRequestSize() + BYTES_PER_BUCKET; if (hiRequestAddr <= lastAddr - pCodeHeap->reserveForJumpStubs) { retVal = true; } } } else { // We also check to see if an allocation in this heap would satisfy // the [loAddr..hiAddr] requirement // Calculate the byte range that can ever be returned by // an allocation in this HeapList element // BYTE * firstAddr = (BYTE *) pCodeHeap->startAddress; BYTE * lastAddr = (BYTE *) pCodeHeap->startAddress + pCodeHeap->maxCodeHeapSize; _ASSERTE(pCodeHeap->startAddress <= pCodeHeap->endAddress); _ASSERTE(firstAddr <= lastAddr); if (pInfo->IsDynamicDomain()) { _ASSERTE(pCodeHeap->reserveForJumpStubs == 0); // We check to see if every allocation in this heap // will satisfy the [loAddr..hiAddr] requirement. // // Dynamic domains use a free list allocator, // thus we can receive any address in the range // when calling AllocMemory with a DynamicDomain // [firstaddr .. lastAddr] must be entirely within // [pInfo->m_loAddr .. pInfo->m_hiAddr] // if ((pInfo->m_loAddr <= firstAddr) && (lastAddr <= pInfo->m_hiAddr)) { // This heap will always satisfy our constraint retVal = true; } } else // non-DynamicDomain { // Calculate the byte range that would be allocated for the // next allocation request into [loRequestAddr..hiRequestAddr] // BYTE * loRequestAddr = (BYTE *) pCodeHeap->endAddress; BYTE * hiRequestAddr = loRequestAddr + pInfo->getRequestSize() + BYTES_PER_BUCKET; _ASSERTE(loRequestAddr <= hiRequestAddr); // loRequestAddr and hiRequestAddr must be entirely within // [pInfo->m_loAddr .. pInfo->m_hiAddr] // if ((pInfo->m_loAddr <= loRequestAddr) && (hiRequestAddr <= pInfo->m_hiAddr)) { // Additionally hiRequestAddr must also be less than or equal to lastAddr. // If throwOnOutOfMemoryWithinRange is not set, conserve reserveForJumpStubs until when it is really needed. if (hiRequestAddr <= lastAddr - (pInfo->getThrowOnOutOfMemoryWithinRange() ? 0 : pCodeHeap->reserveForJumpStubs)) { // This heap will be able to satisfy our constraint retVal = true; } } } } return retVal; } EEJitManager::DomainCodeHeapList * EEJitManager::CreateCodeHeapList(CodeHeapRequestInfo *pInfo) { CONTRACTL { THROWS; GC_NOTRIGGER; PRECONDITION(m_CodeHeapCritSec.OwnedByCurrentThread()); } CONTRACTL_END; NewHolder<DomainCodeHeapList> pNewList(new DomainCodeHeapList()); pNewList->m_pAllocator = pInfo->m_pAllocator; DomainCodeHeapList **ppList = NULL; if (pInfo->IsDynamicDomain()) ppList = m_DynamicDomainCodeHeaps.AppendThrowing(); else ppList = m_DomainCodeHeaps.AppendThrowing(); *ppList = pNewList; return pNewList.Extract(); } LoaderHeap *EEJitManager::GetJitMetaHeap(MethodDesc *pMD) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; LoaderAllocator *pAllocator = pMD->GetLoaderAllocator(); _ASSERTE(pAllocator); return pAllocator->GetLowFrequencyHeap(); } BYTE* EEJitManager::allocGCInfo(CodeHeader* pCodeHeader, DWORD blockSize, size_t * pAllocationSize) { CONTRACTL { THROWS; GC_NOTRIGGER; } CONTRACTL_END; MethodDesc* pMD = pCodeHeader->GetMethodDesc(); // sadly for light code gen I need the check in here. We should change GetJitMetaHeap if (pMD->IsLCGMethod()) { CrstHolder ch(&m_CodeHeapCritSec); pCodeHeader->SetGCInfo((BYTE*)(void*)pMD->AsDynamicMethodDesc()->GetResolver()->GetJitMetaHeap()->New(blockSize)); } else { pCodeHeader->SetGCInfo((BYTE*) (void*)GetJitMetaHeap(pMD)->AllocMem(S_SIZE_T(blockSize))); } _ASSERTE(pCodeHeader->GetGCInfo()); // AllocMem throws if there's not enough memory * pAllocationSize = blockSize; // Store the allocation size so we can backout later. return(pCodeHeader->GetGCInfo()); } void* EEJitManager::allocEHInfoRaw(CodeHeader* pCodeHeader, DWORD blockSize, size_t * pAllocationSize) { CONTRACTL { THROWS; GC_NOTRIGGER; } CONTRACTL_END; MethodDesc* pMD = pCodeHeader->GetMethodDesc(); void * mem = NULL; // sadly for light code gen I need the check in here. We should change GetJitMetaHeap if (pMD->IsLCGMethod()) { CrstHolder ch(&m_CodeHeapCritSec); mem = (void*)pMD->AsDynamicMethodDesc()->GetResolver()->GetJitMetaHeap()->New(blockSize); } else { mem = (void*)GetJitMetaHeap(pMD)->AllocMem(S_SIZE_T(blockSize)); } _ASSERTE(mem); // AllocMem throws if there's not enough memory * pAllocationSize = blockSize; // Store the allocation size so we can backout later. return(mem); } EE_ILEXCEPTION* EEJitManager::allocEHInfo(CodeHeader* pCodeHeader, unsigned numClauses, size_t * pAllocationSize) { CONTRACTL { THROWS; GC_NOTRIGGER; } CONTRACTL_END; // Note - pCodeHeader->phdrJitEHInfo - sizeof(size_t) contains the number of EH clauses DWORD temp = EE_ILEXCEPTION::Size(numClauses); DWORD blockSize = 0; if (!ClrSafeInt<DWORD>::addition(temp, sizeof(size_t), blockSize)) COMPlusThrowOM(); BYTE *EHInfo = (BYTE*)allocEHInfoRaw(pCodeHeader, blockSize, pAllocationSize); pCodeHeader->SetEHInfo((EE_ILEXCEPTION*) (EHInfo + sizeof(size_t))); pCodeHeader->GetEHInfo()->Init(numClauses); *((size_t *)EHInfo) = numClauses; return(pCodeHeader->GetEHInfo()); } JumpStubBlockHeader * EEJitManager::allocJumpStubBlock(MethodDesc* pMD, DWORD numJumps, BYTE * loAddr, BYTE * hiAddr, LoaderAllocator *pLoaderAllocator, bool throwOnOutOfMemoryWithinRange) { CONTRACT(JumpStubBlockHeader *) { THROWS; GC_NOTRIGGER; PRECONDITION(loAddr < hiAddr); PRECONDITION(pLoaderAllocator != NULL); POSTCONDITION((RETVAL != NULL) || !throwOnOutOfMemoryWithinRange); } CONTRACT_END; _ASSERTE((sizeof(JumpStubBlockHeader) % CODE_SIZE_ALIGN) == 0); size_t blockSize = sizeof(JumpStubBlockHeader) + (size_t) numJumps * BACK_TO_BACK_JUMP_ALLOCATE_SIZE; HeapList *pCodeHeap = NULL; CodeHeapRequestInfo requestInfo(pMD, pLoaderAllocator, loAddr, hiAddr); requestInfo.setThrowOnOutOfMemoryWithinRange(throwOnOutOfMemoryWithinRange); TADDR mem; ExecutableWriterHolder<JumpStubBlockHeader> blockWriterHolder; // Scope the lock { CrstHolder ch(&m_CodeHeapCritSec); mem = (TADDR) allocCodeRaw(&requestInfo, sizeof(CodeHeader), blockSize, CODE_SIZE_ALIGN, &pCodeHeap); if (mem == NULL) { _ASSERTE(!throwOnOutOfMemoryWithinRange); RETURN(NULL); } // CodeHeader comes immediately before the block CodeHeader * pCodeHdr = (CodeHeader *) (mem - sizeof(CodeHeader)); ExecutableWriterHolder<CodeHeader> codeHdrWriterHolder(pCodeHdr, sizeof(CodeHeader)); codeHdrWriterHolder.GetRW()->SetStubCodeBlockKind(STUB_CODE_BLOCK_JUMPSTUB); NibbleMapSetUnlocked(pCodeHeap, mem, TRUE); blockWriterHolder = ExecutableWriterHolder<JumpStubBlockHeader>((JumpStubBlockHeader *)mem, sizeof(JumpStubBlockHeader)); _ASSERTE(IS_ALIGNED(blockWriterHolder.GetRW(), CODE_SIZE_ALIGN)); } blockWriterHolder.GetRW()->m_next = NULL; blockWriterHolder.GetRW()->m_used = 0; blockWriterHolder.GetRW()->m_allocated = numJumps; if (pMD && pMD->IsLCGMethod()) blockWriterHolder.GetRW()->SetHostCodeHeap(static_cast<HostCodeHeap*>(pCodeHeap->pHeap)); else blockWriterHolder.GetRW()->SetLoaderAllocator(pLoaderAllocator); LOG((LF_JIT, LL_INFO1000, "Allocated new JumpStubBlockHeader for %d stubs at" FMT_ADDR " in loader allocator " FMT_ADDR "\n", numJumps, DBG_ADDR(mem) , DBG_ADDR(pLoaderAllocator) )); RETURN((JumpStubBlockHeader*)mem); } void * EEJitManager::allocCodeFragmentBlock(size_t blockSize, unsigned alignment, LoaderAllocator *pLoaderAllocator, StubCodeBlockKind kind) { CONTRACT(void *) { THROWS; GC_NOTRIGGER; PRECONDITION(pLoaderAllocator != NULL); POSTCONDITION(CheckPointer(RETVAL)); } CONTRACT_END; HeapList *pCodeHeap = NULL; CodeHeapRequestInfo requestInfo(NULL, pLoaderAllocator, NULL, NULL); #ifdef TARGET_AMD64 // CodeFragments are pretty much always Precodes that may need to be patched with jump stubs at some point in future // We will assume the worst case that every FixupPrecode will need to be patched and reserve the jump stubs accordingly requestInfo.setReserveForJumpStubs((blockSize / 8) * JUMP_ALLOCATE_SIZE); #endif TADDR mem; // Scope the lock { CrstHolder ch(&m_CodeHeapCritSec); mem = (TADDR) allocCodeRaw(&requestInfo, sizeof(CodeHeader), blockSize, alignment, &pCodeHeap); // CodeHeader comes immediately before the block CodeHeader * pCodeHdr = (CodeHeader *) (mem - sizeof(CodeHeader)); ExecutableWriterHolder<CodeHeader> codeHdrWriterHolder(pCodeHdr, sizeof(CodeHeader)); codeHdrWriterHolder.GetRW()->SetStubCodeBlockKind(kind); NibbleMapSetUnlocked(pCodeHeap, mem, TRUE); // Record the jump stub reservation pCodeHeap->reserveForJumpStubs += requestInfo.getReserveForJumpStubs(); } RETURN((void *)mem); } #endif // !DACCESS_COMPILE GCInfoToken EEJitManager::GetGCInfoToken(const METHODTOKEN& MethodToken) { CONTRACTL { NOTHROW; GC_NOTRIGGER; HOST_NOCALLS; SUPPORTS_DAC; } CONTRACTL_END; // The JIT-ed code always has the current version of GCInfo return{ GetCodeHeader(MethodToken)->GetGCInfo(), GCINFO_VERSION }; } // creates an enumeration and returns the number of EH clauses unsigned EEJitManager::InitializeEHEnumeration(const METHODTOKEN& MethodToken, EH_CLAUSE_ENUMERATOR* pEnumState) { LIMITED_METHOD_CONTRACT; EE_ILEXCEPTION * EHInfo = GetCodeHeader(MethodToken)->GetEHInfo(); pEnumState->iCurrentPos = 0; // since the EH info is not compressed, the clause number is used to do the enumeration pEnumState->pExceptionClauseArray = NULL; if (!EHInfo) return 0; pEnumState->pExceptionClauseArray = dac_cast<TADDR>(EHInfo->EHClause(0)); return *(dac_cast<PTR_unsigned>(dac_cast<TADDR>(EHInfo) - sizeof(size_t))); } PTR_EXCEPTION_CLAUSE_TOKEN EEJitManager::GetNextEHClause(EH_CLAUSE_ENUMERATOR* pEnumState, EE_ILEXCEPTION_CLAUSE* pEHClauseOut) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; unsigned iCurrentPos = pEnumState->iCurrentPos; pEnumState->iCurrentPos++; EE_ILEXCEPTION_CLAUSE* pClause = &(dac_cast<PTR_EE_ILEXCEPTION_CLAUSE>(pEnumState->pExceptionClauseArray)[iCurrentPos]); *pEHClauseOut = *pClause; return dac_cast<PTR_EXCEPTION_CLAUSE_TOKEN>(pClause); } #ifndef DACCESS_COMPILE TypeHandle EEJitManager::ResolveEHClause(EE_ILEXCEPTION_CLAUSE* pEHClause, CrawlFrame *pCf) { // We don't want to use a runtime contract here since this codepath is used during // the processing of a hard SO. Contracts use a significant amount of stack // which we can't afford for those cases. STATIC_CONTRACT_THROWS; STATIC_CONTRACT_GC_TRIGGERS; _ASSERTE(NULL != pCf); _ASSERTE(NULL != pEHClause); _ASSERTE(IsTypedHandler(pEHClause)); TypeHandle typeHnd = TypeHandle(); mdToken typeTok = mdTokenNil; // CachedTypeHandle's are filled in at JIT time, and not cached when accessed multiple times if (HasCachedTypeHandle(pEHClause)) { return TypeHandle::FromPtr(pEHClause->TypeHandle); } else { typeTok = pEHClause->ClassToken; } MethodDesc* pMD = pCf->GetFunction(); Module* pModule = pMD->GetModule(); PREFIX_ASSUME(pModule != NULL); SigTypeContext typeContext(pMD); VarKind k = hasNoVars; // In the vast majority of cases the code under the "if" below // will not be executed. // // First grab the representative instantiations. For code // shared by multiple generic instantiations these are the // canonical (representative) instantiation. if (TypeFromToken(typeTok) == mdtTypeSpec) { PCCOR_SIGNATURE pSig; ULONG cSig; IfFailThrow(pModule->GetMDImport()->GetTypeSpecFromToken(typeTok, &pSig, &cSig)); SigPointer psig(pSig, cSig); k = psig.IsPolyType(&typeContext); // Grab the active class and method instantiation. This exact instantiation is only // needed in the corner case of "generic" exception catching in shared // generic code. We don't need the exact instantiation if the token // doesn't contain E_T_VAR or E_T_MVAR. if ((k & hasSharableVarsMask) != 0) { Instantiation classInst; Instantiation methodInst; pCf->GetExactGenericInstantiations(&classInst, &methodInst); SigTypeContext::InitTypeContext(pMD,classInst, methodInst,&typeContext); } } return ClassLoader::LoadTypeDefOrRefOrSpecThrowing(pModule, typeTok, &typeContext, ClassLoader::ReturnNullIfNotFound); } void EEJitManager::RemoveJitData (CodeHeader * pCHdr, size_t GCinfo_len, size_t EHinfo_len) { CONTRACTL { NOTHROW; GC_TRIGGERS; } CONTRACTL_END; MethodDesc* pMD = pCHdr->GetMethodDesc(); if (pMD->IsLCGMethod()) { void * codeStart = (pCHdr + 1); { CrstHolder ch(&m_CodeHeapCritSec); LCGMethodResolver * pResolver = pMD->AsDynamicMethodDesc()->GetLCGMethodResolver(); // Clear the pointer only if it matches what we are about to free. // There can be cases where the JIT is reentered and we JITed the method multiple times. if (pResolver->m_recordCodePointer == codeStart) pResolver->m_recordCodePointer = NULL; } #if defined(TARGET_AMD64) // Remove the unwind information (if applicable) UnwindInfoTable::UnpublishUnwindInfoForMethod((TADDR)codeStart); #endif // defined(TARGET_AMD64) HostCodeHeap* pHeap = HostCodeHeap::GetCodeHeap((TADDR)codeStart); FreeCodeMemory(pHeap, codeStart); // We are leaking GCInfo and EHInfo. They will be freed once the dynamic method is destroyed. return; } { CrstHolder ch(&m_CodeHeapCritSec); HeapList *pHp = GetCodeHeapList(); while (pHp && ((pHp->startAddress > (TADDR)pCHdr) || (pHp->endAddress < (TADDR)pCHdr + sizeof(CodeHeader)))) { pHp = pHp->GetNext(); } _ASSERTE(pHp && pHp->pHdrMap); // Better to just return than AV? if (pHp == NULL) return; NibbleMapSetUnlocked(pHp, (TADDR)(pCHdr + 1), FALSE); } // Backout the GCInfo if (GCinfo_len > 0) { GetJitMetaHeap(pMD)->BackoutMem(pCHdr->GetGCInfo(), GCinfo_len); } // Backout the EHInfo BYTE *EHInfo = (BYTE *)pCHdr->GetEHInfo(); if (EHInfo) { EHInfo -= sizeof(size_t); _ASSERTE(EHinfo_len>0); GetJitMetaHeap(pMD)->BackoutMem(EHInfo, EHinfo_len); } // <TODO> // TODO: Although we have backout the GCInfo and EHInfo, we haven't actually backout the // code buffer itself. As a result, we might leak the CodeHeap if jitting fails after // the code buffer is allocated. // // However, it appears non-trival to fix this. // Here are some of the reasons: // (1) AllocCode calls in AllocCodeRaw to alloc code buffer in the CodeHeap. The exact size // of the code buffer is not known until the alignment is calculated deep on the stack. // (2) AllocCodeRaw is called in 3 different places. We might need to remember the // information for these places. // (3) AllocCodeRaw might create a new CodeHeap. We should remember exactly which // CodeHeap is used to allocate the code buffer. // // Fortunately, this is not a severe leak since the CodeHeap will be reclaimed on appdomain unload. // // </TODO> return; } // appdomain is being unloaded, so delete any data associated with it. We have to do this in two stages. // On the first stage, we remove the elements from the list. On the second stage, which occurs after a GC // we know that only threads who were in preemptive mode prior to the GC could possibly still be looking // at an element that is about to be deleted. All such threads are guarded with a reader count, so if the // count is 0, we can safely delete, otherwise we must add to the cleanup list to be deleted later. We know // there can only be one unload at a time, so we can use a single var to hold the unlinked, but not deleted, // elements. void EEJitManager::Unload(LoaderAllocator *pAllocator) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; CrstHolder ch(&m_CodeHeapCritSec); DomainCodeHeapList **ppList = m_DomainCodeHeaps.Table(); int count = m_DomainCodeHeaps.Count(); for (int i=0; i < count; i++) { if (ppList[i]->m_pAllocator== pAllocator) { DomainCodeHeapList *pList = ppList[i]; m_DomainCodeHeaps.DeleteByIndex(i); // pHeapList is allocated in pHeap, so only need to delete the LoaderHeap itself count = pList->m_CodeHeapList.Count(); for (i=0; i < count; i++) { HeapList *pHeapList = pList->m_CodeHeapList[i]; DeleteCodeHeap(pHeapList); } // this is ok to do delete as anyone accessing the DomainCodeHeapList structure holds the critical section. delete pList; break; } } ppList = m_DynamicDomainCodeHeaps.Table(); count = m_DynamicDomainCodeHeaps.Count(); for (int i=0; i < count; i++) { if (ppList[i]->m_pAllocator== pAllocator) { DomainCodeHeapList *pList = ppList[i]; m_DynamicDomainCodeHeaps.DeleteByIndex(i); // pHeapList is allocated in pHeap, so only need to delete the CodeHeap itself count = pList->m_CodeHeapList.Count(); for (i=0; i < count; i++) { HeapList *pHeapList = pList->m_CodeHeapList[i]; // m_DynamicDomainCodeHeaps should only contain HostCodeHeap. RemoveFromCleanupList(static_cast<HostCodeHeap*>(pHeapList->pHeap)); DeleteCodeHeap(pHeapList); } // this is ok to do delete as anyone accessing the DomainCodeHeapList structure holds the critical section. delete pList; break; } } ExecutableAllocator::ResetLazyPreferredRangeHint(); } EEJitManager::DomainCodeHeapList::DomainCodeHeapList() { LIMITED_METHOD_CONTRACT; m_pAllocator = NULL; } EEJitManager::DomainCodeHeapList::~DomainCodeHeapList() { LIMITED_METHOD_CONTRACT; } void EEJitManager::RemoveCodeHeapFromDomainList(CodeHeap *pHeap, LoaderAllocator *pAllocator) { CONTRACTL { NOTHROW; GC_NOTRIGGER; PRECONDITION(m_CodeHeapCritSec.OwnedByCurrentThread()); } CONTRACTL_END; // get the AppDomain heap list for pAllocator in m_DynamicDomainCodeHeaps DomainCodeHeapList *pList = GetCodeHeapList(NULL, pAllocator, TRUE); // go through the heaps and find and remove pHeap int count = pList->m_CodeHeapList.Count(); for (int i = 0; i < count; i++) { HeapList *pHeapList = pList->m_CodeHeapList[i]; if (pHeapList->pHeap == pHeap) { // found the heap to remove. If this is the only heap we remove the whole DomainCodeHeapList // otherwise we just remove this heap if (count == 1) { m_DynamicDomainCodeHeaps.Delete(pList); delete pList; } else pList->m_CodeHeapList.Delete(i); // if this heaplist is cached in the loader allocator, we must clear it if (pAllocator->m_pLastUsedDynamicCodeHeap == ((void *) pHeapList)) { pAllocator->m_pLastUsedDynamicCodeHeap = NULL; } break; } } } void EEJitManager::FreeCodeMemory(HostCodeHeap *pCodeHeap, void * codeStart) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; CrstHolder ch(&m_CodeHeapCritSec); // FreeCodeMemory is only supported on LCG methods, // so pCodeHeap can only be a HostCodeHeap. // clean up the NibbleMap NibbleMapSetUnlocked(pCodeHeap->m_pHeapList, (TADDR)codeStart, FALSE); // The caller of this method doesn't call HostCodeHeap->FreeMemForCode // directly because the operation should be protected by m_CodeHeapCritSec. pCodeHeap->FreeMemForCode(codeStart); } void ExecutionManager::CleanupCodeHeaps() { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; _ASSERTE (g_fProcessDetach || (GCHeapUtilities::IsGCInProgress() && ::IsGCThread())); GetEEJitManager()->CleanupCodeHeaps(); } void EEJitManager::CleanupCodeHeaps() { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; _ASSERTE (g_fProcessDetach || (GCHeapUtilities::IsGCInProgress() && ::IsGCThread())); // Quick out, don't even take the lock if we have not cleanup to do. // This is important because ETW takes the CodeHeapLock when it is doing // rundown, and if there are many JIT compiled methods, this can take a while. // Because cleanup is called synchronously before a GC, this means GCs get // blocked while ETW is doing rundown. By not taking the lock we avoid // this stall most of the time since cleanup is rare, and ETW rundown is rare // the likelihood of both is very very rare. if (m_cleanupList == NULL) return; CrstHolder ch(&m_CodeHeapCritSec); if (m_cleanupList == NULL) return; HostCodeHeap *pHeap = m_cleanupList; m_cleanupList = NULL; while (pHeap) { HostCodeHeap *pNextHeap = pHeap->m_pNextHeapToRelease; DWORD allocCount = pHeap->m_AllocationCount; if (allocCount == 0) { LOG((LF_BCL, LL_INFO100, "Level2 - Destryoing CodeHeap [0x%p, vt(0x%x)] - ref count 0\n", pHeap, *(size_t*)pHeap)); RemoveCodeHeapFromDomainList(pHeap, pHeap->m_pAllocator); DeleteCodeHeap(pHeap->m_pHeapList); } else { LOG((LF_BCL, LL_INFO100, "Level2 - Restoring CodeHeap [0x%p, vt(0x%x)] - ref count %d\n", pHeap, *(size_t*)pHeap, allocCount)); } pHeap = pNextHeap; } } void EEJitManager::RemoveFromCleanupList(HostCodeHeap *pCodeHeap) { CONTRACTL { NOTHROW; GC_NOTRIGGER; PRECONDITION(m_CodeHeapCritSec.OwnedByCurrentThread()); } CONTRACTL_END; HostCodeHeap *pHeap = m_cleanupList; HostCodeHeap *pPrevHeap = NULL; while (pHeap) { if (pHeap == pCodeHeap) { if (pPrevHeap) { // remove current heap from list pPrevHeap->m_pNextHeapToRelease = pHeap->m_pNextHeapToRelease; } else { m_cleanupList = pHeap->m_pNextHeapToRelease; } break; } pPrevHeap = pHeap; pHeap = pHeap->m_pNextHeapToRelease; } } void EEJitManager::AddToCleanupList(HostCodeHeap *pCodeHeap) { CONTRACTL { NOTHROW; GC_NOTRIGGER; PRECONDITION(m_CodeHeapCritSec.OwnedByCurrentThread()); } CONTRACTL_END; // it may happen that the current heap count goes to 0 and later on, before it is destroyed, it gets reused // for another dynamic method. // It's then possible that the ref count reaches 0 multiple times. If so we simply don't add it again // Also on cleanup we check the the ref count is actually 0. HostCodeHeap *pHeap = m_cleanupList; while (pHeap) { if (pHeap == pCodeHeap) { LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap [0x%p, vt(0x%x)] - Already in list\n", pCodeHeap, *(size_t*)pCodeHeap)); break; } pHeap = pHeap->m_pNextHeapToRelease; } if (pHeap == NULL) { pCodeHeap->m_pNextHeapToRelease = m_cleanupList; m_cleanupList = pCodeHeap; LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap [0x%p, vt(0x%x)] - ref count %d - Adding to cleanup list\n", pCodeHeap, *(size_t*)pCodeHeap, pCodeHeap->m_AllocationCount)); } } void EEJitManager::DeleteCodeHeap(HeapList *pHeapList) { CONTRACTL { NOTHROW; GC_NOTRIGGER; PRECONDITION(m_CodeHeapCritSec.OwnedByCurrentThread()); } CONTRACTL_END; HeapList *pHp = GetCodeHeapList(); if (pHp == pHeapList) m_pCodeHeap = pHp->GetNext(); else { HeapList *pHpNext = pHp->GetNext(); while (pHpNext != pHeapList) { pHp = pHpNext; _ASSERTE(pHp != NULL); // should always find the HeapList pHpNext = pHp->GetNext(); } pHp->SetNext(pHeapList->GetNext()); } DeleteEEFunctionTable((PVOID)pHeapList->GetModuleBase()); ExecutionManager::DeleteRange((TADDR)pHeapList->GetModuleBase()); LOG((LF_JIT, LL_INFO100, "DeleteCodeHeap start" FMT_ADDR "end" FMT_ADDR "\n", (const BYTE*)pHeapList->startAddress, (const BYTE*)pHeapList->endAddress )); CodeHeap* pHeap = pHeapList->pHeap; delete pHeap; delete pHeapList; } #endif // #ifndef DACCESS_COMPILE static CodeHeader * GetCodeHeaderFromDebugInfoRequest(const DebugInfoRequest & request) { CONTRACTL { NOTHROW; GC_NOTRIGGER; SUPPORTS_DAC; } CONTRACTL_END; TADDR address = (TADDR) request.GetStartAddress(); _ASSERTE(address != NULL); CodeHeader * pHeader = dac_cast<PTR_CodeHeader>(address & ~3) - 1; _ASSERTE(pHeader != NULL); return pHeader; } //----------------------------------------------------------------------------- // Get vars from Jit Store //----------------------------------------------------------------------------- BOOL EEJitManager::GetBoundariesAndVars( const DebugInfoRequest & request, IN FP_IDS_NEW fpNew, IN void * pNewData, OUT ULONG32 * pcMap, OUT ICorDebugInfo::OffsetMapping **ppMap, OUT ULONG32 * pcVars, OUT ICorDebugInfo::NativeVarInfo **ppVars) { CONTRACTL { THROWS; // on OOM. GC_NOTRIGGER; // getting vars shouldn't trigger SUPPORTS_DAC; } CONTRACTL_END; CodeHeader * pHdr = GetCodeHeaderFromDebugInfoRequest(request); _ASSERTE(pHdr != NULL); PTR_BYTE pDebugInfo = pHdr->GetDebugInfo(); // No header created, which means no jit information is available. if (pDebugInfo == NULL) return FALSE; #ifdef FEATURE_ON_STACK_REPLACEMENT BOOL hasFlagByte = TRUE; #else BOOL hasFlagByte = FALSE; #endif // Uncompress. This allocates memory and may throw. CompressDebugInfo::RestoreBoundariesAndVars( fpNew, pNewData, // allocators pDebugInfo, // input pcMap, ppMap, // output pcVars, ppVars, // output hasFlagByte ); return TRUE; } #ifdef DACCESS_COMPILE void CodeHeader::EnumMemoryRegions(CLRDataEnumMemoryFlags flags, IJitManager* pJitMan) { CONTRACTL { NOTHROW; GC_NOTRIGGER; SUPPORTS_DAC; } CONTRACTL_END; DAC_ENUM_DTHIS(); #ifdef USE_INDIRECT_CODEHEADER this->pRealCodeHeader.EnumMem(); #endif // USE_INDIRECT_CODEHEADER #ifdef FEATURE_ON_STACK_REPLACEMENT BOOL hasFlagByte = TRUE; #else BOOL hasFlagByte = FALSE; #endif if (this->GetDebugInfo() != NULL) { CompressDebugInfo::EnumMemoryRegions(flags, this->GetDebugInfo(), hasFlagByte); } } //----------------------------------------------------------------------------- // Enumerate for minidumps. //----------------------------------------------------------------------------- void EEJitManager::EnumMemoryRegionsForMethodDebugInfo(CLRDataEnumMemoryFlags flags, MethodDesc * pMD) { CONTRACTL { NOTHROW; GC_NOTRIGGER; SUPPORTS_DAC; } CONTRACTL_END; DebugInfoRequest request; PCODE addrCode = pMD->GetNativeCode(); request.InitFromStartingAddr(pMD, addrCode); CodeHeader * pHeader = GetCodeHeaderFromDebugInfoRequest(request); pHeader->EnumMemoryRegions(flags, NULL); } #endif // DACCESS_COMPILE PCODE EEJitManager::GetCodeAddressForRelOffset(const METHODTOKEN& MethodToken, DWORD relOffset) { WRAPPER_NO_CONTRACT; CodeHeader * pHeader = GetCodeHeader(MethodToken); return pHeader->GetCodeStartAddress() + relOffset; } BOOL EEJitManager::JitCodeToMethodInfo( RangeSection * pRangeSection, PCODE currentPC, MethodDesc ** ppMethodDesc, EECodeInfo * pCodeInfo) { CONTRACTL { NOTHROW; GC_NOTRIGGER; SUPPORTS_DAC; } CONTRACTL_END; _ASSERTE(pRangeSection != NULL); TADDR start = dac_cast<PTR_EEJitManager>(pRangeSection->pjit)->FindMethodCode(pRangeSection, currentPC); if (start == NULL) return FALSE; CodeHeader * pCHdr = PTR_CodeHeader(start - sizeof(CodeHeader)); if (pCHdr->IsStubCodeBlock()) return FALSE; _ASSERTE(pCHdr->GetMethodDesc()->SanityCheck()); if (pCodeInfo) { pCodeInfo->m_methodToken = METHODTOKEN(pRangeSection, dac_cast<TADDR>(pCHdr)); // This can be counted on for Jitted code. For NGEN code in the case // where we have hot/cold splitting this isn't valid and we need to // take into account cold code. pCodeInfo->m_relOffset = (DWORD)(PCODEToPINSTR(currentPC) - pCHdr->GetCodeStartAddress()); #ifdef FEATURE_EH_FUNCLETS // Computed lazily by code:EEJitManager::LazyGetFunctionEntry pCodeInfo->m_pFunctionEntry = NULL; #endif } if (ppMethodDesc) { *ppMethodDesc = pCHdr->GetMethodDesc(); } return TRUE; } StubCodeBlockKind EEJitManager::GetStubCodeBlockKind(RangeSection * pRangeSection, PCODE currentPC) { CONTRACTL { NOTHROW; GC_NOTRIGGER; SUPPORTS_DAC; } CONTRACTL_END; TADDR start = dac_cast<PTR_EEJitManager>(pRangeSection->pjit)->FindMethodCode(pRangeSection, currentPC); if (start == NULL) return STUB_CODE_BLOCK_NOCODE; CodeHeader * pCHdr = PTR_CodeHeader(start - sizeof(CodeHeader)); return pCHdr->IsStubCodeBlock() ? pCHdr->GetStubCodeBlockKind() : STUB_CODE_BLOCK_MANAGED; } TADDR EEJitManager::FindMethodCode(PCODE currentPC) { CONTRACTL { NOTHROW; GC_NOTRIGGER; SUPPORTS_DAC; } CONTRACTL_END; RangeSection * pRS = ExecutionManager::FindCodeRange(currentPC, ExecutionManager::GetScanFlags()); if (pRS == NULL || (pRS->flags & RangeSection::RANGE_SECTION_CODEHEAP) == 0) return STUB_CODE_BLOCK_NOCODE; return dac_cast<PTR_EEJitManager>(pRS->pjit)->FindMethodCode(pRS, currentPC); } // Finds the header corresponding to the code at offset "delta". // Returns NULL if there is no header for the given "delta" TADDR EEJitManager::FindMethodCode(RangeSection * pRangeSection, PCODE currentPC) { LIMITED_METHOD_DAC_CONTRACT; _ASSERTE(pRangeSection != NULL); HeapList *pHp = dac_cast<PTR_HeapList>(pRangeSection->pHeapListOrZapModule); if ((currentPC < pHp->startAddress) || (currentPC > pHp->endAddress)) { return NULL; } TADDR base = pHp->mapBase; TADDR delta = currentPC - base; PTR_DWORD pMap = pHp->pHdrMap; PTR_DWORD pMapStart = pMap; DWORD tmp; size_t startPos = ADDR2POS(delta); // align to 32byte buckets // ( == index into the array of nibbles) DWORD offset = ADDR2OFFS(delta); // this is the offset inside the bucket + 1 _ASSERTE(offset == (offset & NIBBLE_MASK)); pMap += (startPos >> LOG2_NIBBLES_PER_DWORD); // points to the proper DWORD of the map // get DWORD and shift down our nibble PREFIX_ASSUME(pMap != NULL); tmp = VolatileLoadWithoutBarrier<DWORD>(pMap) >> POS2SHIFTCOUNT(startPos); if ((tmp & NIBBLE_MASK) && ((tmp & NIBBLE_MASK) <= offset) ) { return base + POSOFF2ADDR(startPos, tmp & NIBBLE_MASK); } // Is there a header in the remainder of the DWORD ? tmp = tmp >> NIBBLE_SIZE; if (tmp) { startPos--; while (!(tmp & NIBBLE_MASK)) { tmp = tmp >> NIBBLE_SIZE; startPos--; } return base + POSOFF2ADDR(startPos, tmp & NIBBLE_MASK); } // We skipped the remainder of the DWORD, // so we must set startPos to the highest position of // previous DWORD, unless we are already on the first DWORD if (startPos < NIBBLES_PER_DWORD) return NULL; startPos = ((startPos >> LOG2_NIBBLES_PER_DWORD) << LOG2_NIBBLES_PER_DWORD) - 1; // Skip "headerless" DWORDS while (pMapStart < pMap && 0 == (tmp = VolatileLoadWithoutBarrier<DWORD>(--pMap))) { startPos -= NIBBLES_PER_DWORD; } // This helps to catch degenerate error cases. This relies on the fact that // startPos cannot ever be bigger than MAX_UINT if (((INT_PTR)startPos) < 0) return NULL; // Find the nibble with the header in the DWORD while (startPos && !(tmp & NIBBLE_MASK)) { tmp = tmp >> NIBBLE_SIZE; startPos--; } if (startPos == 0 && tmp == 0) return NULL; return base + POSOFF2ADDR(startPos, tmp & NIBBLE_MASK); } #if !defined(DACCESS_COMPILE) void EEJitManager::NibbleMapSet(HeapList * pHp, TADDR pCode, BOOL bSet) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; CrstHolder ch(&m_CodeHeapCritSec); NibbleMapSetUnlocked(pHp, pCode, bSet); } void EEJitManager::NibbleMapSetUnlocked(HeapList * pHp, TADDR pCode, BOOL bSet) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; // Currently all callers to this method ensure EEJitManager::m_CodeHeapCritSec // is held. _ASSERTE(m_CodeHeapCritSec.OwnedByCurrentThread()); _ASSERTE(pCode >= pHp->mapBase); size_t delta = pCode - pHp->mapBase; size_t pos = ADDR2POS(delta); DWORD value = bSet?ADDR2OFFS(delta):0; DWORD index = (DWORD) (pos >> LOG2_NIBBLES_PER_DWORD); DWORD mask = ~((DWORD) HIGHEST_NIBBLE_MASK >> ((pos & NIBBLES_PER_DWORD_MASK) << LOG2_NIBBLE_SIZE)); value = value << POS2SHIFTCOUNT(pos); PTR_DWORD pMap = pHp->pHdrMap; // assert that we don't overwrite an existing offset // (it's a reset or it is empty) _ASSERTE(!value || !((*(pMap+index))& ~mask)); // It is important for this update to be atomic. Synchronization would be required with FindMethodCode otherwise. *(pMap+index) = ((*(pMap+index))&mask)|value; } #endif // !DACCESS_COMPILE #if defined(FEATURE_EH_FUNCLETS) // Note: This returns the root unwind record (the one that describes the prolog) // in cases where there is fragmented unwind. PTR_RUNTIME_FUNCTION EEJitManager::LazyGetFunctionEntry(EECodeInfo * pCodeInfo) { CONTRACTL { NOTHROW; GC_NOTRIGGER; SUPPORTS_DAC; } CONTRACTL_END; if (!pCodeInfo->IsValid()) { return NULL; } CodeHeader * pHeader = GetCodeHeader(pCodeInfo->GetMethodToken()); DWORD address = RUNTIME_FUNCTION__BeginAddress(pHeader->GetUnwindInfo(0)) + pCodeInfo->GetRelOffset(); // We need the module base address to calculate the end address of a function from the functionEntry. // Thus, save it off right now. TADDR baseAddress = pCodeInfo->GetModuleBase(); // NOTE: We could binary search here, if it would be helpful (e.g., large number of funclets) for (UINT iUnwindInfo = 0; iUnwindInfo < pHeader->GetNumberOfUnwindInfos(); iUnwindInfo++) { PTR_RUNTIME_FUNCTION pFunctionEntry = pHeader->GetUnwindInfo(iUnwindInfo); if (RUNTIME_FUNCTION__BeginAddress(pFunctionEntry) <= address && address < RUNTIME_FUNCTION__EndAddress(pFunctionEntry, baseAddress)) { #if defined(EXCEPTION_DATA_SUPPORTS_FUNCTION_FRAGMENTS) && defined(TARGET_ARM64) // If we might have fragmented unwind, and we're on ARM64, make sure // to returning the root record, as the trailing records don't have // prolog unwind codes. pFunctionEntry = FindRootEntry(pFunctionEntry, baseAddress); #endif return pFunctionEntry; } } return NULL; } DWORD EEJitManager::GetFuncletStartOffsets(const METHODTOKEN& MethodToken, DWORD* pStartFuncletOffsets, DWORD dwLength) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; CodeHeader * pCH = GetCodeHeader(MethodToken); TADDR moduleBase = JitTokenToModuleBase(MethodToken); _ASSERTE(pCH->GetNumberOfUnwindInfos() >= 1); DWORD parentBeginRva = RUNTIME_FUNCTION__BeginAddress(pCH->GetUnwindInfo(0)); DWORD nFunclets = 0; for (COUNT_T iUnwindInfo = 1; iUnwindInfo < pCH->GetNumberOfUnwindInfos(); iUnwindInfo++) { PTR_RUNTIME_FUNCTION pFunctionEntry = pCH->GetUnwindInfo(iUnwindInfo); #if defined(EXCEPTION_DATA_SUPPORTS_FUNCTION_FRAGMENTS) if (IsFunctionFragment(moduleBase, pFunctionEntry)) { // This is a fragment (not the funclet beginning); skip it continue; } #endif // EXCEPTION_DATA_SUPPORTS_FUNCTION_FRAGMENTS DWORD funcletBeginRva = RUNTIME_FUNCTION__BeginAddress(pFunctionEntry); DWORD relParentOffsetToFunclet = funcletBeginRva - parentBeginRva; if (nFunclets < dwLength) pStartFuncletOffsets[nFunclets] = relParentOffsetToFunclet; nFunclets++; } return nFunclets; } #if defined(DACCESS_COMPILE) // This function is basically like RtlLookupFunctionEntry(), except that it works with DAC // to read the function entries out of process. Also, it can only look up function entries // inside mscorwks.dll, since DAC doesn't know anything about other unmanaged dll's. void GetUnmanagedStackWalkInfo(IN ULONG64 ControlPc, OUT UINT_PTR* pModuleBase, OUT UINT_PTR* pFuncEntry) { WRAPPER_NO_CONTRACT; if (pModuleBase) { *pModuleBase = NULL; } if (pFuncEntry) { *pFuncEntry = NULL; } PEDecoder peDecoder(DacGlobalBase()); SIZE_T baseAddr = dac_cast<TADDR>(peDecoder.GetBase()); SIZE_T cbSize = (SIZE_T)peDecoder.GetVirtualSize(); // Check if the control PC is inside mscorwks. if ( (baseAddr <= ControlPc) && (ControlPc < (baseAddr + cbSize)) ) { if (pModuleBase) { *pModuleBase = baseAddr; } if (pFuncEntry) { // Check if there is a static function table. COUNT_T cbSize = 0; TADDR pExceptionDir = peDecoder.GetDirectoryEntryData(IMAGE_DIRECTORY_ENTRY_EXCEPTION, &cbSize); if (pExceptionDir != NULL) { // Do a binary search on the static function table of mscorwks.dll. HRESULT hr = E_FAIL; TADDR taFuncEntry; T_RUNTIME_FUNCTION functionEntry; DWORD dwLow = 0; DWORD dwHigh = cbSize / sizeof(T_RUNTIME_FUNCTION); DWORD dwMid = 0; while (dwLow <= dwHigh) { dwMid = (dwLow + dwHigh) >> 1; taFuncEntry = pExceptionDir + dwMid * sizeof(T_RUNTIME_FUNCTION); hr = DacReadAll(taFuncEntry, &functionEntry, sizeof(functionEntry), false); if (FAILED(hr)) { return; } if (ControlPc < baseAddr + functionEntry.BeginAddress) { dwHigh = dwMid - 1; } else if (ControlPc >= baseAddr + RUNTIME_FUNCTION__EndAddress(&functionEntry, baseAddr)) { dwLow = dwMid + 1; } else { _ASSERTE(pFuncEntry); #ifdef _TARGET_AMD64_ // On amd64, match RtlLookupFunctionEntry behavior by resolving indirect function entries // back to the associated owning function entry. if ((functionEntry.UnwindData & RUNTIME_FUNCTION_INDIRECT) != 0) { DWORD dwRvaOfOwningFunctionEntry = (functionEntry.UnwindData & ~RUNTIME_FUNCTION_INDIRECT); taFuncEntry = peDecoder.GetRvaData(dwRvaOfOwningFunctionEntry); hr = DacReadAll(taFuncEntry, &functionEntry, sizeof(functionEntry), false); if (FAILED(hr)) { return; } _ASSERTE((functionEntry.UnwindData & RUNTIME_FUNCTION_INDIRECT) == 0); } #endif // _TARGET_AMD64_ *pFuncEntry = (UINT_PTR)(T_RUNTIME_FUNCTION*)PTR_RUNTIME_FUNCTION(taFuncEntry); break; } } if (dwLow > dwHigh) { _ASSERTE(*pFuncEntry == NULL); } } } } } #endif // DACCESS_COMPILE extern "C" void GetRuntimeStackWalkInfo(IN ULONG64 ControlPc, OUT UINT_PTR* pModuleBase, OUT UINT_PTR* pFuncEntry) { WRAPPER_NO_CONTRACT; BEGIN_PRESERVE_LAST_ERROR; BEGIN_ENTRYPOINT_VOIDRET; if (pModuleBase) *pModuleBase = NULL; if (pFuncEntry) *pFuncEntry = NULL; EECodeInfo codeInfo((PCODE)ControlPc); if (!codeInfo.IsValid()) { #if defined(DACCESS_COMPILE) GetUnmanagedStackWalkInfo(ControlPc, pModuleBase, pFuncEntry); #endif // DACCESS_COMPILE goto Exit; } if (pModuleBase) { *pModuleBase = (UINT_PTR)codeInfo.GetModuleBase(); } if (pFuncEntry) { *pFuncEntry = (UINT_PTR)(PT_RUNTIME_FUNCTION)codeInfo.GetFunctionEntry(); } Exit: END_ENTRYPOINT_VOIDRET; END_PRESERVE_LAST_ERROR; } #endif // FEATURE_EH_FUNCLETS #ifdef DACCESS_COMPILE void EEJitManager::EnumMemoryRegions(CLRDataEnumMemoryFlags flags) { IJitManager::EnumMemoryRegions(flags); // // Save all of the code heaps. // HeapList* heap; for (heap = m_pCodeHeap; heap; heap = heap->GetNext()) { DacEnumHostDPtrMem(heap); if (heap->pHeap.IsValid()) { heap->pHeap->EnumMemoryRegions(flags); } DacEnumMemoryRegion(heap->startAddress, (ULONG32) (heap->endAddress - heap->startAddress)); if (heap->pHdrMap.IsValid()) { ULONG32 nibbleMapSize = (ULONG32) HEAP2MAPSIZE(ROUND_UP_TO_PAGE(heap->maxCodeHeapSize)); DacEnumMemoryRegion(dac_cast<TADDR>(heap->pHdrMap), nibbleMapSize); } } } #endif // #ifdef DACCESS_COMPILE #ifndef DACCESS_COMPILE //******************************************************* // Execution Manager //******************************************************* // Init statics void ExecutionManager::Init() { CONTRACTL { THROWS; GC_NOTRIGGER; } CONTRACTL_END; m_JumpStubCrst.Init(CrstJumpStubCache, CrstFlags(CRST_UNSAFE_ANYMODE|CRST_DEBUGGER_THREAD)); m_RangeCrst.Init(CrstExecuteManRangeLock, CRST_UNSAFE_ANYMODE); m_pDefaultCodeMan = new EECodeManager(); m_pEEJitManager = new EEJitManager(); #ifdef FEATURE_READYTORUN m_pReadyToRunJitManager = new ReadyToRunJitManager(); #endif } #endif // #ifndef DACCESS_COMPILE //************************************************************************** RangeSection * ExecutionManager::FindCodeRange(PCODE currentPC, ScanFlag scanFlag) { CONTRACTL { NOTHROW; GC_NOTRIGGER; SUPPORTS_DAC; } CONTRACTL_END; if (currentPC == NULL) return NULL; if (scanFlag == ScanReaderLock) return FindCodeRangeWithLock(currentPC); return GetRangeSection(currentPC); } //************************************************************************** NOINLINE // Make sure that the slow path with lock won't affect the fast path RangeSection * ExecutionManager::FindCodeRangeWithLock(PCODE currentPC) { CONTRACTL { NOTHROW; GC_NOTRIGGER; SUPPORTS_DAC; } CONTRACTL_END; ReaderLockHolder rlh; return GetRangeSection(currentPC); } //************************************************************************** PCODE ExecutionManager::GetCodeStartAddress(PCODE currentPC) { WRAPPER_NO_CONTRACT; _ASSERTE(currentPC != NULL); EECodeInfo codeInfo(currentPC); if (!codeInfo.IsValid()) return NULL; return PINSTRToPCODE(codeInfo.GetStartAddress()); } //************************************************************************** NativeCodeVersion ExecutionManager::GetNativeCodeVersion(PCODE currentPC) { CONTRACTL { NOTHROW; GC_NOTRIGGER; FORBID_FAULT; } CONTRACTL_END; EECodeInfo codeInfo(currentPC); return codeInfo.IsValid() ? codeInfo.GetNativeCodeVersion() : NativeCodeVersion(); } //************************************************************************** MethodDesc * ExecutionManager::GetCodeMethodDesc(PCODE currentPC) { CONTRACTL { NOTHROW; GC_NOTRIGGER; FORBID_FAULT; } CONTRACTL_END EECodeInfo codeInfo(currentPC); if (!codeInfo.IsValid()) return NULL; return codeInfo.GetMethodDesc(); } //************************************************************************** BOOL ExecutionManager::IsManagedCode(PCODE currentPC) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; if (currentPC == NULL) return FALSE; if (GetScanFlags() == ScanReaderLock) return IsManagedCodeWithLock(currentPC); return IsManagedCodeWorker(currentPC); } //************************************************************************** NOINLINE // Make sure that the slow path with lock won't affect the fast path BOOL ExecutionManager::IsManagedCodeWithLock(PCODE currentPC) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; ReaderLockHolder rlh; return IsManagedCodeWorker(currentPC); } //************************************************************************** BOOL ExecutionManager::IsManagedCode(PCODE currentPC, HostCallPreference hostCallPreference /*=AllowHostCalls*/, BOOL *pfFailedReaderLock /*=NULL*/) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; #ifdef DACCESS_COMPILE return IsManagedCode(currentPC); #else if (hostCallPreference == AllowHostCalls) { return IsManagedCode(currentPC); } ReaderLockHolder rlh(hostCallPreference); if (!rlh.Acquired()) { _ASSERTE(pfFailedReaderLock != NULL); *pfFailedReaderLock = TRUE; return FALSE; } return IsManagedCodeWorker(currentPC); #endif } //************************************************************************** // Assumes that the ExecutionManager reader/writer lock is taken or that // it is safe not to take it. BOOL ExecutionManager::IsManagedCodeWorker(PCODE currentPC) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; // This may get called for arbitrary code addresses. Note that the lock is // taken over the call to JitCodeToMethodInfo too so that nobody pulls out // the range section from underneath us. RangeSection * pRS = GetRangeSection(currentPC); if (pRS == NULL) return FALSE; if (pRS->flags & RangeSection::RANGE_SECTION_CODEHEAP) { // Typically if we find a Jit Manager we are inside a managed method // but on we could also be in a stub, so we check for that // as well and we don't consider stub to be real managed code. TADDR start = dac_cast<PTR_EEJitManager>(pRS->pjit)->FindMethodCode(pRS, currentPC); if (start == NULL) return FALSE; CodeHeader * pCHdr = PTR_CodeHeader(start - sizeof(CodeHeader)); if (!pCHdr->IsStubCodeBlock()) return TRUE; } #ifdef FEATURE_READYTORUN else if (pRS->flags & RangeSection::RANGE_SECTION_READYTORUN) { if (dac_cast<PTR_ReadyToRunJitManager>(pRS->pjit)->JitCodeToMethodInfo(pRS, currentPC, NULL, NULL)) return TRUE; } #endif return FALSE; } //************************************************************************** // Assumes that it is safe not to take it the ExecutionManager reader/writer lock BOOL ExecutionManager::IsReadyToRunCode(PCODE currentPC) { CONTRACTL{ NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; // This may get called for arbitrary code addresses. Note that the lock is // taken over the call to JitCodeToMethodInfo too so that nobody pulls out // the range section from underneath us. #ifdef FEATURE_READYTORUN RangeSection * pRS = GetRangeSection(currentPC); if (pRS != NULL && (pRS->flags & RangeSection::RANGE_SECTION_READYTORUN)) { if (dac_cast<PTR_ReadyToRunJitManager>(pRS->pjit)->JitCodeToMethodInfo(pRS, currentPC, NULL, NULL)) return TRUE; } #endif return FALSE; } #ifndef FEATURE_MERGE_JIT_AND_ENGINE /*********************************************************************/ // This static method returns the name of the jit dll // LPCWSTR ExecutionManager::GetJitName() { STANDARD_VM_CONTRACT; LPCWSTR pwzJitName = NULL; // Try to obtain a name for the jit library from the env. variable IfFailThrow(CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_JitName, const_cast<LPWSTR *>(&pwzJitName))); if (NULL == pwzJitName) { pwzJitName = MAKEDLLNAME_W(W("clrjit")); } return pwzJitName; } #endif // !FEATURE_MERGE_JIT_AND_ENGINE RangeSection* ExecutionManager::GetRangeSection(TADDR addr) { CONTRACTL { NOTHROW; HOST_NOCALLS; GC_NOTRIGGER; SUPPORTS_DAC; } CONTRACTL_END; RangeSection * pHead = m_CodeRangeList; if (pHead == NULL) { return NULL; } RangeSection *pCurr = pHead; RangeSection *pLast = NULL; #ifndef DACCESS_COMPILE RangeSection *pLastUsedRS = (pCurr != NULL) ? pCurr->pLastUsed : NULL; if (pLastUsedRS != NULL) { // positive case if ((addr >= pLastUsedRS->LowAddress) && (addr < pLastUsedRS->HighAddress) ) { return pLastUsedRS; } RangeSection * pNextAfterLastUsedRS = pLastUsedRS->pnext; // negative case if ((addr < pLastUsedRS->LowAddress) && (pNextAfterLastUsedRS == NULL || addr >= pNextAfterLastUsedRS->HighAddress)) { return NULL; } } #endif while (pCurr != NULL) { // See if addr is in [pCurr->LowAddress .. pCurr->HighAddress) if (pCurr->LowAddress <= addr) { // Since we are sorted, once pCurr->HighAddress is less than addr // then all subsequence ones will also be lower, so we are done. if (addr >= pCurr->HighAddress) { // we'll return NULL and put pLast into pLastUsed pCurr = NULL; } else { // addr must be in [pCurr->LowAddress .. pCurr->HighAddress) _ASSERTE((pCurr->LowAddress <= addr) && (addr < pCurr->HighAddress)); // Found the matching RangeSection // we'll return pCurr and put it into pLastUsed pLast = pCurr; } break; } pLast = pCurr; pCurr = pCurr->pnext; } #ifndef DACCESS_COMPILE // Cache pCurr as pLastUsed in the head node // Unless we are on an MP system with many cpus // where this sort of caching actually diminishes scaling during server GC // due to many processors writing to a common location if (g_SystemInfo.dwNumberOfProcessors < 4 || !GCHeapUtilities::IsServerHeap() || !GCHeapUtilities::IsGCInProgress()) pHead->pLastUsed = pLast; #endif return pCurr; } RangeSection* ExecutionManager::GetRangeSectionAndPrev(RangeSection *pHead, TADDR addr, RangeSection** ppPrev) { WRAPPER_NO_CONTRACT; RangeSection *pCurr; RangeSection *pPrev; RangeSection *result = NULL; for (pPrev = NULL, pCurr = pHead; pCurr != NULL; pPrev = pCurr, pCurr = pCurr->pnext) { // See if addr is in [pCurr->LowAddress .. pCurr->HighAddress) if (pCurr->LowAddress > addr) continue; if (addr >= pCurr->HighAddress) break; // addr must be in [pCurr->LowAddress .. pCurr->HighAddress) _ASSERTE((pCurr->LowAddress <= addr) && (addr < pCurr->HighAddress)); // Found the matching RangeSection result = pCurr; // Write back pPrev to ppPrev if it is non-null if (ppPrev != NULL) *ppPrev = pPrev; break; } // If we failed to find a match write NULL to ppPrev if it is non-null if ((ppPrev != NULL) && (result == NULL)) { *ppPrev = NULL; } return result; } /* static */ PTR_Module ExecutionManager::FindZapModule(TADDR currentData) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; STATIC_CONTRACT_HOST_CALLS; SUPPORTS_DAC; } CONTRACTL_END; ReaderLockHolder rlh; RangeSection * pRS = GetRangeSection(currentData); if (pRS == NULL) return NULL; if (pRS->flags & RangeSection::RANGE_SECTION_CODEHEAP) return NULL; #ifdef FEATURE_READYTORUN if (pRS->flags & RangeSection::RANGE_SECTION_READYTORUN) return NULL; #endif return dac_cast<PTR_Module>(pRS->pHeapListOrZapModule); } /* static */ PTR_Module ExecutionManager::FindReadyToRunModule(TADDR currentData) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; STATIC_CONTRACT_HOST_CALLS; SUPPORTS_DAC; } CONTRACTL_END; #ifdef FEATURE_READYTORUN ReaderLockHolder rlh; RangeSection * pRS = GetRangeSection(currentData); if (pRS == NULL) return NULL; if (pRS->flags & RangeSection::RANGE_SECTION_CODEHEAP) return NULL; if (pRS->flags & RangeSection::RANGE_SECTION_READYTORUN) return dac_cast<PTR_Module>(pRS->pHeapListOrZapModule);; return NULL; #else return NULL; #endif } /* static */ PTR_Module ExecutionManager::FindModuleForGCRefMap(TADDR currentData) { CONTRACTL { NOTHROW; GC_NOTRIGGER; SUPPORTS_DAC; } CONTRACTL_END; RangeSection * pRS = FindCodeRange(currentData, ExecutionManager::GetScanFlags()); if (pRS == NULL) return NULL; if (pRS->flags & RangeSection::RANGE_SECTION_CODEHEAP) return NULL; #ifdef FEATURE_READYTORUN // RANGE_SECTION_READYTORUN is intentionally not filtered out here #endif return dac_cast<PTR_Module>(pRS->pHeapListOrZapModule); } #ifndef DACCESS_COMPILE /* NGenMem depends on this entrypoint */ NOINLINE void ExecutionManager::AddCodeRange(TADDR pStartRange, TADDR pEndRange, IJitManager * pJit, RangeSection::RangeSectionFlags flags, void * pHp) { CONTRACTL { THROWS; GC_NOTRIGGER; PRECONDITION(CheckPointer(pJit)); PRECONDITION(CheckPointer(pHp)); } CONTRACTL_END; AddRangeHelper(pStartRange, pEndRange, pJit, flags, dac_cast<TADDR>(pHp)); } void ExecutionManager::AddRangeHelper(TADDR pStartRange, TADDR pEndRange, IJitManager * pJit, RangeSection::RangeSectionFlags flags, TADDR pHeapListOrZapModule) { CONTRACTL { THROWS; GC_NOTRIGGER; HOST_CALLS; PRECONDITION(pStartRange < pEndRange); PRECONDITION(pHeapListOrZapModule != NULL); } CONTRACTL_END; RangeSection *pnewrange = new RangeSection; _ASSERTE(pEndRange > pStartRange); pnewrange->LowAddress = pStartRange; pnewrange->HighAddress = pEndRange; pnewrange->pjit = pJit; pnewrange->pnext = NULL; pnewrange->flags = flags; pnewrange->pLastUsed = NULL; pnewrange->pHeapListOrZapModule = pHeapListOrZapModule; #if defined(TARGET_AMD64) pnewrange->pUnwindInfoTable = NULL; #endif // defined(TARGET_AMD64) { CrstHolder ch(&m_RangeCrst); // Acquire the Crst before linking in a new RangeList RangeSection * current = m_CodeRangeList; RangeSection * previous = NULL; if (current != NULL) { while (true) { // Sort addresses top down so that more recently created ranges // will populate the top of the list if (pnewrange->LowAddress > current->LowAddress) { // Asserts if ranges are overlapping _ASSERTE(pnewrange->LowAddress >= current->HighAddress); pnewrange->pnext = current; if (previous == NULL) // insert new head { m_CodeRangeList = pnewrange; } else { // insert in the middle previous->pnext = pnewrange; } break; } RangeSection * next = current->pnext; if (next == NULL) // insert at end of list { current->pnext = pnewrange; break; } // Continue walking the RangeSection list previous = current; current = next; } } else { m_CodeRangeList = pnewrange; } } } // Deletes a single range starting at pStartRange void ExecutionManager::DeleteRange(TADDR pStartRange) { CONTRACTL { NOTHROW; // If this becomes throwing, then revisit the queuing of deletes below. GC_NOTRIGGER; } CONTRACTL_END; RangeSection *pCurr = NULL; { // Acquire the Crst before unlinking a RangeList. // NOTE: The Crst must be acquired BEFORE we grab the writer lock, as the // writer lock forces us into a forbid suspend thread region, and it's illegal // to enter a Crst after the forbid suspend thread region is entered CrstHolder ch(&m_RangeCrst); // Acquire the WriterLock and prevent any readers from walking the RangeList. // This also forces us to enter a forbid suspend thread region, to prevent // hijacking profilers from grabbing this thread and walking it (the walk may // require the reader lock, which would cause a deadlock). WriterLockHolder wlh; RangeSection *pPrev = NULL; pCurr = GetRangeSectionAndPrev(m_CodeRangeList, pStartRange, &pPrev); // pCurr points at the Range that needs to be unlinked from the RangeList if (pCurr != NULL) { // If pPrev is NULL the the head of this list is to be deleted if (pPrev == NULL) { m_CodeRangeList = pCurr->pnext; } else { _ASSERT(pPrev->pnext == pCurr); pPrev->pnext = pCurr->pnext; } // Clear the cache pLastUsed in the head node (if any) RangeSection * head = m_CodeRangeList; if (head != NULL) { head->pLastUsed = NULL; } // // Cannot delete pCurr here because we own the WriterLock and if this is // a hosted scenario then the hosting api callback cannot occur in a forbid // suspend region, which the writer lock is. // } } // // Now delete the node // if (pCurr != NULL) { #if defined(TARGET_AMD64) if (pCurr->pUnwindInfoTable != 0) delete pCurr->pUnwindInfoTable; #endif // defined(TARGET_AMD64) delete pCurr; } } #endif // #ifndef DACCESS_COMPILE #ifdef DACCESS_COMPILE void ExecutionManager::EnumRangeList(RangeSection* list, CLRDataEnumMemoryFlags flags) { while (list != NULL) { // If we can't read the target memory, stop immediately so we don't work // with broken data. if (!DacEnumMemoryRegion(dac_cast<TADDR>(list), sizeof(*list))) break; if (list->pjit.IsValid()) { list->pjit->EnumMemoryRegions(flags); } if (!(list->flags & RangeSection::RANGE_SECTION_CODEHEAP)) { PTR_Module pModule = dac_cast<PTR_Module>(list->pHeapListOrZapModule); if (pModule.IsValid()) { pModule->EnumMemoryRegions(flags, true); } } list = list->pnext; #if defined (_DEBUG) // Test hook: when testing on debug builds, we want an easy way to test that the while // correctly terminates in the face of ridiculous stuff from the target. if (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_DumpGeneration_IntentionallyCorruptDataFromTarget) == 1) { // Force us to struggle on with something bad. if (list == NULL) { list = (RangeSection *)&flags; } } #endif // (_DEBUG) } } void ExecutionManager::EnumMemoryRegions(CLRDataEnumMemoryFlags flags) { STATIC_CONTRACT_HOST_CALLS; ReaderLockHolder rlh; // // Report the global data portions. // m_CodeRangeList.EnumMem(); m_pDefaultCodeMan.EnumMem(); // // Walk structures and report. // if (m_CodeRangeList.IsValid()) { EnumRangeList(m_CodeRangeList, flags); } } #endif // #ifdef DACCESS_COMPILE #if !defined(DACCESS_COMPILE) void ExecutionManager::Unload(LoaderAllocator *pLoaderAllocator) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; // a size of 0 is a signal to Nirvana to flush the entire cache FlushInstructionCache(GetCurrentProcess(),0,0); /* StackwalkCacheEntry::EIP is an address into code. Since we are unloading the code, we need to invalidate the cache. Otherwise, its possible that another appdomain might generate code at the very same address, and we might incorrectly think that the old StackwalkCacheEntry corresponds to it. So flush the cache. */ StackwalkCache::Invalidate(pLoaderAllocator); JumpStubCache * pJumpStubCache = (JumpStubCache *) pLoaderAllocator->m_pJumpStubCache; if (pJumpStubCache != NULL) { delete pJumpStubCache; pLoaderAllocator->m_pJumpStubCache = NULL; } GetEEJitManager()->Unload(pLoaderAllocator); } // This method is used by the JIT and the runtime for PreStubs. It will return // the address of a short jump thunk that will jump to the 'target' address. // It is only needed when the target architecture has a perferred call instruction // that doesn't actually span the full address space. This is true for x64 where // the preferred call instruction is a 32-bit pc-rel call instruction. // (This is also true on ARM64, but it not true for x86) // // For these architectures, in JITed code and in the prestub, we encode direct calls // using the preferred call instruction and we also try to insure that the Jitted // code is within the 32-bit pc-rel range of clr.dll to allow direct JIT helper calls. // // When the call target is too far away to encode using the preferred call instruction. // We will create a short code thunk that uncoditionally jumps to the target address. // We call this jump thunk a "jumpStub" in the CLR code. // We have the requirement that the "jumpStub" that we create on demand be usable by // the preferred call instruction, this requires that on x64 the location in memory // where we create the "jumpStub" be within the 32-bit pc-rel range of the call that // needs it. // // The arguments to this method: // pMD - the MethodDesc for the currenty managed method in Jitted code // or for the target method for a PreStub // It is required if calling from or to a dynamic method (LCG method) // target - The call target address (this is the address that was too far to encode) // loAddr // hiAddr - The range of the address that we must place the jumpStub in, so that it // can be used to encode the preferred call instruction. // pLoaderAllocator // - The Loader allocator to use for allocations, this can be null. // When it is null, then the pMD must be valid and is used to obtain // the allocator. // // This method will either locate and return an existing jumpStub thunk that can be // reused for this request, because it meets all of the requirements necessary. // Or it will allocate memory in the required region and create a new jumpStub that // meets all of the requirements necessary. // // Note that for dynamic methods (LCG methods) we cannot share the jumpStubs between // different methods. This is because we allow for the unloading (reclaiming) of // individual dynamic methods. And we associate the jumpStub memory allocated with // the dynamic method that requested the jumpStub. // PCODE ExecutionManager::jumpStub(MethodDesc* pMD, PCODE target, BYTE * loAddr, BYTE * hiAddr, LoaderAllocator *pLoaderAllocator, bool throwOnOutOfMemoryWithinRange) { CONTRACT(PCODE) { THROWS; GC_NOTRIGGER; MODE_ANY; PRECONDITION(pLoaderAllocator != NULL || pMD != NULL); PRECONDITION(loAddr < hiAddr); POSTCONDITION((RETVAL != NULL) || !throwOnOutOfMemoryWithinRange); } CONTRACT_END; PCODE jumpStub = NULL; if (pLoaderAllocator == NULL) { pLoaderAllocator = pMD->GetLoaderAllocator(); } _ASSERTE(pLoaderAllocator != NULL); bool isLCG = pMD && pMD->IsLCGMethod(); LCGMethodResolver * pResolver = nullptr; JumpStubCache * pJumpStubCache = (JumpStubCache *) pLoaderAllocator->m_pJumpStubCache; if (isLCG) { pResolver = pMD->AsDynamicMethodDesc()->GetLCGMethodResolver(); pJumpStubCache = pResolver->m_pJumpStubCache; } CrstHolder ch(&m_JumpStubCrst); if (pJumpStubCache == NULL) { pJumpStubCache = new JumpStubCache(); if (isLCG) { pResolver->m_pJumpStubCache = pJumpStubCache; } else { pLoaderAllocator->m_pJumpStubCache = pJumpStubCache; } } if (isLCG) { // Increment counter of LCG jump stub lookup attempts m_LCG_JumpStubLookup++; } else { // Increment counter of normal jump stub lookup attempts m_normal_JumpStubLookup++; } // search for a matching jumpstub in the jumpStubCache // for (JumpStubTable::KeyIterator i = pJumpStubCache->m_Table.Begin(target), end = pJumpStubCache->m_Table.End(target); i != end; i++) { jumpStub = i->m_jumpStub; _ASSERTE(jumpStub != NULL); // Is the matching entry with the requested range? if (((TADDR)loAddr <= jumpStub) && (jumpStub <= (TADDR)hiAddr)) { RETURN(jumpStub); } } // If we get here we need to create a new jump stub // add or change the jump stub table to point at the new one jumpStub = getNextJumpStub(pMD, target, loAddr, hiAddr, pLoaderAllocator, throwOnOutOfMemoryWithinRange); // this statement can throw if (jumpStub == NULL) { _ASSERTE(!throwOnOutOfMemoryWithinRange); RETURN(NULL); } _ASSERTE(((TADDR)loAddr <= jumpStub) && (jumpStub <= (TADDR)hiAddr)); LOG((LF_JIT, LL_INFO10000, "Add JumpStub to" FMT_ADDR "at" FMT_ADDR "\n", DBG_ADDR(target), DBG_ADDR(jumpStub) )); RETURN(jumpStub); } PCODE ExecutionManager::getNextJumpStub(MethodDesc* pMD, PCODE target, BYTE * loAddr, BYTE * hiAddr, LoaderAllocator *pLoaderAllocator, bool throwOnOutOfMemoryWithinRange) { CONTRACT(PCODE) { THROWS; GC_NOTRIGGER; PRECONDITION(pLoaderAllocator != NULL); PRECONDITION(m_JumpStubCrst.OwnedByCurrentThread()); POSTCONDITION((RETVAL != NULL) || !throwOnOutOfMemoryWithinRange); } CONTRACT_END; BYTE * jumpStub = NULL; BYTE * jumpStubRW = NULL; bool isLCG = pMD && pMD->IsLCGMethod(); // For LCG we request a small block of 4 jumpstubs, because we can not share them // with any other methods and very frequently our method only needs one jump stub. // Using 4 gives a request size of (32 + 4*12) or 80 bytes. // Also note that request sizes are rounded up to a multiples of 16. // The request size is calculated into 'blockSize' in allocJumpStubBlock. // For x64 the value of BACK_TO_BACK_JUMP_ALLOCATE_SIZE is 12 bytes // and the sizeof(JumpStubBlockHeader) is 32. // DWORD numJumpStubs = isLCG ? 4 : DEFAULT_JUMPSTUBS_PER_BLOCK; JumpStubCache * pJumpStubCache = (JumpStubCache *) pLoaderAllocator->m_pJumpStubCache; if (isLCG) { LCGMethodResolver * pResolver; pResolver = pMD->AsDynamicMethodDesc()->GetLCGMethodResolver(); pJumpStubCache = pResolver->m_pJumpStubCache; } JumpStubBlockHeader ** ppHead = &(pJumpStubCache->m_pBlocks); JumpStubBlockHeader * curBlock = *ppHead; ExecutableWriterHolder<JumpStubBlockHeader> curBlockWriterHolder; // allocate a new jumpstub from 'curBlock' if it is not fully allocated // while (curBlock) { _ASSERTE(pLoaderAllocator == (isLCG ? curBlock->GetHostCodeHeap()->GetAllocator() : curBlock->GetLoaderAllocator())); if (curBlock->m_used < curBlock->m_allocated) { jumpStub = (BYTE *) curBlock + sizeof(JumpStubBlockHeader) + ((size_t) curBlock->m_used * BACK_TO_BACK_JUMP_ALLOCATE_SIZE); if ((loAddr <= jumpStub) && (jumpStub <= hiAddr)) { // We will update curBlock->m_used at "DONE" size_t blockSize = sizeof(JumpStubBlockHeader) + (size_t) numJumpStubs * BACK_TO_BACK_JUMP_ALLOCATE_SIZE; curBlockWriterHolder = ExecutableWriterHolder<JumpStubBlockHeader>(curBlock, blockSize); jumpStubRW = (BYTE *)((TADDR)jumpStub + (TADDR)curBlockWriterHolder.GetRW() - (TADDR)curBlock); goto DONE; } } curBlock = curBlock->m_next; } // If we get here then we need to allocate a new JumpStubBlock if (isLCG) { #ifdef TARGET_AMD64 // Note this these values are not requirements, instead we are // just confirming the values that are mentioned in the comments. _ASSERTE(BACK_TO_BACK_JUMP_ALLOCATE_SIZE == 12); _ASSERTE(sizeof(JumpStubBlockHeader) == 32); #endif // Increment counter of LCG jump stub block allocations m_LCG_JumpStubBlockAllocCount++; } else { // Increment counter of normal jump stub block allocations m_normal_JumpStubBlockAllocCount++; } // allocJumpStubBlock will allocate from the LoaderCodeHeap for normal methods // and will allocate from a HostCodeHeap for LCG methods. // // note that this can throw an OOM exception curBlock = ExecutionManager::GetEEJitManager()->allocJumpStubBlock(pMD, numJumpStubs, loAddr, hiAddr, pLoaderAllocator, throwOnOutOfMemoryWithinRange); if (curBlock == NULL) { _ASSERTE(!throwOnOutOfMemoryWithinRange); RETURN(NULL); } curBlockWriterHolder = ExecutableWriterHolder<JumpStubBlockHeader>(curBlock, sizeof(JumpStubBlockHeader) + ((size_t) (curBlock->m_used + 1) * BACK_TO_BACK_JUMP_ALLOCATE_SIZE)); jumpStubRW = (BYTE *) curBlockWriterHolder.GetRW() + sizeof(JumpStubBlockHeader) + ((size_t) curBlock->m_used * BACK_TO_BACK_JUMP_ALLOCATE_SIZE); jumpStub = (BYTE *) curBlock + sizeof(JumpStubBlockHeader) + ((size_t) curBlock->m_used * BACK_TO_BACK_JUMP_ALLOCATE_SIZE); _ASSERTE((loAddr <= jumpStub) && (jumpStub <= hiAddr)); curBlockWriterHolder.GetRW()->m_next = *ppHead; *ppHead = curBlock; DONE: _ASSERTE((curBlock->m_used < curBlock->m_allocated)); #ifdef TARGET_ARM64 // 8-byte alignment is required on ARM64 _ASSERTE(((UINT_PTR)jumpStub & 7) == 0); #endif emitBackToBackJump(jumpStub, jumpStubRW, (void*) target); #ifdef FEATURE_PERFMAP PerfMap::LogStubs(__FUNCTION__, "emitBackToBackJump", (PCODE)jumpStub, BACK_TO_BACK_JUMP_ALLOCATE_SIZE); #endif // We always add the new jumpstub to the jumpStubCache // _ASSERTE(pJumpStubCache != NULL); JumpStubEntry entry; entry.m_target = target; entry.m_jumpStub = (PCODE)jumpStub; pJumpStubCache->m_Table.Add(entry); curBlockWriterHolder.GetRW()->m_used++; // record that we have used up one more jumpStub in the block // Every time we create a new jumpStub thunk one of these counters is incremented if (isLCG) { // Increment counter of LCG unique jump stubs m_LCG_JumpStubUnique++; } else { // Increment counter of normal unique jump stubs m_normal_JumpStubUnique++; } // Is the 'curBlock' now completely full? if (curBlock->m_used == curBlock->m_allocated) { if (isLCG) { // Increment counter of LCG jump stub blocks that are full m_LCG_JumpStubBlockFullCount++; // Log this "LCG JumpStubBlock filled" along with the four counter values STRESS_LOG4(LF_JIT, LL_INFO1000, "LCG JumpStubBlock filled - (%u, %u, %u, %u)\n", m_LCG_JumpStubLookup, m_LCG_JumpStubUnique, m_LCG_JumpStubBlockAllocCount, m_LCG_JumpStubBlockFullCount); } else { // Increment counter of normal jump stub blocks that are full m_normal_JumpStubBlockFullCount++; // Log this "normal JumpStubBlock filled" along with the four counter values STRESS_LOG4(LF_JIT, LL_INFO1000, "Normal JumpStubBlock filled - (%u, %u, %u, %u)\n", m_normal_JumpStubLookup, m_normal_JumpStubUnique, m_normal_JumpStubBlockAllocCount, m_normal_JumpStubBlockFullCount); if ((m_LCG_JumpStubLookup > 0) && ((m_normal_JumpStubBlockFullCount % 5) == 1)) { // Every 5 occurrence of the above we also // Log "LCG JumpStubBlock status" along with the four counter values STRESS_LOG4(LF_JIT, LL_INFO1000, "LCG JumpStubBlock status - (%u, %u, %u, %u)\n", m_LCG_JumpStubLookup, m_LCG_JumpStubUnique, m_LCG_JumpStubBlockAllocCount, m_LCG_JumpStubBlockFullCount); } } } RETURN((PCODE)jumpStub); } #endif // !DACCESS_COMPILE static void GetFuncletStartOffsetsHelper(PCODE pCodeStart, SIZE_T size, SIZE_T ofsAdj, PTR_RUNTIME_FUNCTION pFunctionEntry, TADDR moduleBase, DWORD * pnFunclets, DWORD* pStartFuncletOffsets, DWORD dwLength) { _ASSERTE(FitsInU4((pCodeStart + size) - moduleBase)); DWORD endAddress = (DWORD)((pCodeStart + size) - moduleBase); // Entries are sorted and terminated by sentinel value (DWORD)-1 for (; RUNTIME_FUNCTION__BeginAddress(pFunctionEntry) < endAddress; pFunctionEntry++) { #ifdef TARGET_AMD64 _ASSERTE((pFunctionEntry->UnwindData & RUNTIME_FUNCTION_INDIRECT) == 0); #endif #if defined(EXCEPTION_DATA_SUPPORTS_FUNCTION_FRAGMENTS) if (IsFunctionFragment(moduleBase, pFunctionEntry)) { // This is a fragment (not the funclet beginning); skip it continue; } #endif // EXCEPTION_DATA_SUPPORTS_FUNCTION_FRAGMENTS if (*pnFunclets < dwLength) { TADDR funcletStartAddress = (moduleBase + RUNTIME_FUNCTION__BeginAddress(pFunctionEntry)) + ofsAdj; _ASSERTE(FitsInU4(funcletStartAddress - pCodeStart)); pStartFuncletOffsets[*pnFunclets] = (DWORD)(funcletStartAddress - pCodeStart); } (*pnFunclets)++; } } #if defined(FEATURE_EH_FUNCLETS) && defined(DACCESS_COMPILE) // // To locate an entry in the function entry table (the program exceptions data directory), the debugger // performs a binary search over the table. This function reports the entries that are encountered in the // binary search. // // Parameters: // pRtf: The target function table entry to be located // pNativeLayout: A pointer to the loaded native layout for the module containing pRtf // static void EnumRuntimeFunctionEntriesToFindEntry(PTR_RUNTIME_FUNCTION pRtf, PTR_PEImageLayout pNativeLayout) { pRtf.EnumMem(); if (pNativeLayout == NULL) { return; } IMAGE_DATA_DIRECTORY * pProgramExceptionsDirectory = pNativeLayout->GetDirectoryEntry(IMAGE_DIRECTORY_ENTRY_EXCEPTION); if (!pProgramExceptionsDirectory || (pProgramExceptionsDirectory->Size == 0) || (pProgramExceptionsDirectory->Size % sizeof(T_RUNTIME_FUNCTION) != 0)) { // Program exceptions directory malformatted return; } PTR_BYTE moduleBase(pNativeLayout->GetBase()); PTR_RUNTIME_FUNCTION firstFunctionEntry(moduleBase + pProgramExceptionsDirectory->VirtualAddress); if (pRtf < firstFunctionEntry || ((dac_cast<TADDR>(pRtf) - dac_cast<TADDR>(firstFunctionEntry)) % sizeof(T_RUNTIME_FUNCTION) != 0)) { // Program exceptions directory malformatted return; } UINT_PTR indexToLocate = pRtf - firstFunctionEntry; UINT_PTR low = 0; // index in the function entry table of low end of search range UINT_PTR high = (pProgramExceptionsDirectory->Size) / sizeof(T_RUNTIME_FUNCTION) - 1; // index of high end of search range UINT_PTR mid = (low + high) / 2; // index of entry to be compared if (indexToLocate > high) { return; } while (indexToLocate != mid) { PTR_RUNTIME_FUNCTION functionEntry = firstFunctionEntry + mid; functionEntry.EnumMem(); if (indexToLocate > mid) { low = mid + 1; } else { high = mid - 1; } mid = (low + high) / 2; _ASSERTE(low <= mid && mid <= high); } } #endif // FEATURE_EH_FUNCLETS #if defined(FEATURE_READYTORUN) // Return start of exception info for a method, or 0 if the method has no EH info DWORD NativeExceptionInfoLookupTable::LookupExceptionInfoRVAForMethod(PTR_CORCOMPILE_EXCEPTION_LOOKUP_TABLE pExceptionLookupTable, COUNT_T numLookupEntries, DWORD methodStartRVA, COUNT_T* pSize) { CONTRACTL { NOTHROW; GC_NOTRIGGER; SUPPORTS_DAC; } CONTRACTL_END; _ASSERTE(pExceptionLookupTable != NULL); COUNT_T start = 0; COUNT_T end = numLookupEntries - 2; // The last entry in the lookup table (end-1) points to a sentinal entry. // The sentinal entry helps to determine the number of EH clauses for the last table entry. _ASSERTE(pExceptionLookupTable->ExceptionLookupEntry(numLookupEntries-1)->MethodStartRVA == (DWORD)-1); // Binary search the lookup table // Using linear search is faster once we get down to small number of entries. while (end - start > 10) { COUNT_T middle = start + (end - start) / 2; _ASSERTE(start < middle && middle < end); DWORD rva = pExceptionLookupTable->ExceptionLookupEntry(middle)->MethodStartRVA; if (methodStartRVA < rva) { end = middle - 1; } else { start = middle; } } for (COUNT_T i = start; i <= end; ++i) { DWORD rva = pExceptionLookupTable->ExceptionLookupEntry(i)->MethodStartRVA; if (methodStartRVA == rva) { CORCOMPILE_EXCEPTION_LOOKUP_TABLE_ENTRY *pEntry = pExceptionLookupTable->ExceptionLookupEntry(i); //Get the count of EH Clause entries CORCOMPILE_EXCEPTION_LOOKUP_TABLE_ENTRY * pNextEntry = pExceptionLookupTable->ExceptionLookupEntry(i + 1); *pSize = pNextEntry->ExceptionInfoRVA - pEntry->ExceptionInfoRVA; return pEntry->ExceptionInfoRVA; } } // Not found return 0; } int NativeUnwindInfoLookupTable::LookupUnwindInfoForMethod(DWORD RelativePc, PTR_RUNTIME_FUNCTION pRuntimeFunctionTable, int Low, int High) { CONTRACTL { NOTHROW; GC_NOTRIGGER; SUPPORTS_DAC; } CONTRACTL_END; #ifdef TARGET_ARM RelativePc |= THUMB_CODE; #endif // Entries are sorted and terminated by sentinel value (DWORD)-1 // Binary search the RUNTIME_FUNCTION table // Use linear search once we get down to a small number of elements // to avoid Binary search overhead. while (High - Low > 10) { int Middle = Low + (High - Low) / 2; PTR_RUNTIME_FUNCTION pFunctionEntry = pRuntimeFunctionTable + Middle; if (RelativePc < pFunctionEntry->BeginAddress) { High = Middle - 1; } else { Low = Middle; } } for (int i = Low; i <= High; ++i) { // This is safe because of entries are terminated by sentinel value (DWORD)-1 PTR_RUNTIME_FUNCTION pNextFunctionEntry = pRuntimeFunctionTable + (i + 1); if (RelativePc < pNextFunctionEntry->BeginAddress) { PTR_RUNTIME_FUNCTION pFunctionEntry = pRuntimeFunctionTable + i; if (RelativePc >= pFunctionEntry->BeginAddress) { return i; } break; } } return -1; } //*************************************************************************************** //*************************************************************************************** #ifndef DACCESS_COMPILE ReadyToRunJitManager::ReadyToRunJitManager() { WRAPPER_NO_CONTRACT; } #endif // #ifndef DACCESS_COMPILE ReadyToRunInfo * ReadyToRunJitManager::JitTokenToReadyToRunInfo(const METHODTOKEN& MethodToken) { CONTRACTL { NOTHROW; GC_NOTRIGGER; HOST_NOCALLS; SUPPORTS_DAC; } CONTRACTL_END; return dac_cast<PTR_Module>(MethodToken.m_pRangeSection->pHeapListOrZapModule)->GetReadyToRunInfo(); } UINT32 ReadyToRunJitManager::JitTokenToGCInfoVersion(const METHODTOKEN& MethodToken) { CONTRACTL{ NOTHROW; GC_NOTRIGGER; HOST_NOCALLS; SUPPORTS_DAC; } CONTRACTL_END; READYTORUN_HEADER * header = JitTokenToReadyToRunInfo(MethodToken)->GetReadyToRunHeader(); return GCInfoToken::ReadyToRunVersionToGcInfoVersion(header->MajorVersion); } PTR_RUNTIME_FUNCTION ReadyToRunJitManager::JitTokenToRuntimeFunction(const METHODTOKEN& MethodToken) { CONTRACTL { NOTHROW; GC_NOTRIGGER; HOST_NOCALLS; SUPPORTS_DAC; } CONTRACTL_END; return dac_cast<PTR_RUNTIME_FUNCTION>(MethodToken.m_pCodeHeader); } TADDR ReadyToRunJitManager::JitTokenToStartAddress(const METHODTOKEN& MethodToken) { CONTRACTL { NOTHROW; GC_NOTRIGGER; HOST_NOCALLS; SUPPORTS_DAC; } CONTRACTL_END; return JitTokenToModuleBase(MethodToken) + RUNTIME_FUNCTION__BeginAddress(dac_cast<PTR_RUNTIME_FUNCTION>(MethodToken.m_pCodeHeader)); } GCInfoToken ReadyToRunJitManager::GetGCInfoToken(const METHODTOKEN& MethodToken) { CONTRACTL { NOTHROW; GC_NOTRIGGER; HOST_NOCALLS; SUPPORTS_DAC; } CONTRACTL_END; PTR_RUNTIME_FUNCTION pRuntimeFunction = JitTokenToRuntimeFunction(MethodToken); TADDR baseAddress = JitTokenToModuleBase(MethodToken); #ifndef DACCESS_COMPILE if (g_IBCLogger.InstrEnabled()) { ReadyToRunInfo * pInfo = JitTokenToReadyToRunInfo(MethodToken); MethodDesc * pMD = pInfo->GetMethodDescForEntryPoint(JitTokenToStartAddress(MethodToken)); g_IBCLogger.LogMethodGCInfoAccess(pMD); } #endif SIZE_T nUnwindDataSize; PTR_VOID pUnwindData = GetUnwindDataBlob(baseAddress, pRuntimeFunction, &nUnwindDataSize); // GCInfo immediatelly follows unwind data PTR_BYTE gcInfo = dac_cast<PTR_BYTE>(pUnwindData) + nUnwindDataSize; UINT32 gcInfoVersion = JitTokenToGCInfoVersion(MethodToken); return{ gcInfo, gcInfoVersion }; } unsigned ReadyToRunJitManager::InitializeEHEnumeration(const METHODTOKEN& MethodToken, EH_CLAUSE_ENUMERATOR* pEnumState) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; ReadyToRunInfo * pReadyToRunInfo = JitTokenToReadyToRunInfo(MethodToken); IMAGE_DATA_DIRECTORY * pExceptionInfoDir = pReadyToRunInfo->FindSection(ReadyToRunSectionType::ExceptionInfo); if (pExceptionInfoDir == NULL) return 0; PEImageLayout * pLayout = pReadyToRunInfo->GetImage(); PTR_CORCOMPILE_EXCEPTION_LOOKUP_TABLE pExceptionLookupTable = dac_cast<PTR_CORCOMPILE_EXCEPTION_LOOKUP_TABLE>(pLayout->GetRvaData(pExceptionInfoDir->VirtualAddress)); COUNT_T numLookupTableEntries = (COUNT_T)(pExceptionInfoDir->Size / sizeof(CORCOMPILE_EXCEPTION_LOOKUP_TABLE_ENTRY)); // at least 2 entries (1 valid entry + 1 sentinal entry) _ASSERTE(numLookupTableEntries >= 2); DWORD methodStartRVA = (DWORD)(JitTokenToStartAddress(MethodToken) - JitTokenToModuleBase(MethodToken)); COUNT_T ehInfoSize = 0; DWORD exceptionInfoRVA = NativeExceptionInfoLookupTable::LookupExceptionInfoRVAForMethod(pExceptionLookupTable, numLookupTableEntries, methodStartRVA, &ehInfoSize); if (exceptionInfoRVA == 0) return 0; pEnumState->iCurrentPos = 0; pEnumState->pExceptionClauseArray = JitTokenToModuleBase(MethodToken) + exceptionInfoRVA; return ehInfoSize / sizeof(CORCOMPILE_EXCEPTION_CLAUSE); } PTR_EXCEPTION_CLAUSE_TOKEN ReadyToRunJitManager::GetNextEHClause(EH_CLAUSE_ENUMERATOR* pEnumState, EE_ILEXCEPTION_CLAUSE* pEHClauseOut) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; unsigned iCurrentPos = pEnumState->iCurrentPos; pEnumState->iCurrentPos++; CORCOMPILE_EXCEPTION_CLAUSE* pClause = &(dac_cast<PTR_CORCOMPILE_EXCEPTION_CLAUSE>(pEnumState->pExceptionClauseArray)[iCurrentPos]); // copy to the input parmeter, this is a nice abstraction for the future // if we want to compress the Clause encoding, we can do without affecting the call sites pEHClauseOut->TryStartPC = pClause->TryStartPC; pEHClauseOut->TryEndPC = pClause->TryEndPC; pEHClauseOut->HandlerStartPC = pClause->HandlerStartPC; pEHClauseOut->HandlerEndPC = pClause->HandlerEndPC; pEHClauseOut->Flags = pClause->Flags; pEHClauseOut->FilterOffset = pClause->FilterOffset; return dac_cast<PTR_EXCEPTION_CLAUSE_TOKEN>(pClause); } StubCodeBlockKind ReadyToRunJitManager::GetStubCodeBlockKind(RangeSection * pRangeSection, PCODE currentPC) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; SUPPORTS_DAC; } CONTRACTL_END; DWORD rva = (DWORD)(currentPC - pRangeSection->LowAddress); PTR_ReadyToRunInfo pReadyToRunInfo = dac_cast<PTR_Module>(pRangeSection->pHeapListOrZapModule)->GetReadyToRunInfo(); PTR_IMAGE_DATA_DIRECTORY pDelayLoadMethodCallThunksDir = pReadyToRunInfo->GetDelayMethodCallThunksSection(); if (pDelayLoadMethodCallThunksDir != NULL) { if (pDelayLoadMethodCallThunksDir->VirtualAddress <= rva && rva < pDelayLoadMethodCallThunksDir->VirtualAddress + pDelayLoadMethodCallThunksDir->Size) return STUB_CODE_BLOCK_METHOD_CALL_THUNK; } return STUB_CODE_BLOCK_UNKNOWN; } #ifndef DACCESS_COMPILE TypeHandle ReadyToRunJitManager::ResolveEHClause(EE_ILEXCEPTION_CLAUSE* pEHClause, CrawlFrame* pCf) { CONTRACTL { THROWS; GC_TRIGGERS; } CONTRACTL_END; _ASSERTE(NULL != pCf); _ASSERTE(NULL != pEHClause); _ASSERTE(IsTypedHandler(pEHClause)); MethodDesc *pMD = PTR_MethodDesc(pCf->GetFunction()); _ASSERTE(pMD != NULL); Module* pModule = pMD->GetModule(); PREFIX_ASSUME(pModule != NULL); SigTypeContext typeContext(pMD); VarKind k = hasNoVars; mdToken typeTok = pEHClause->ClassToken; // In the vast majority of cases the code un der the "if" below // will not be executed. // // First grab the representative instantiations. For code // shared by multiple generic instantiations these are the // canonical (representative) instantiation. if (TypeFromToken(typeTok) == mdtTypeSpec) { PCCOR_SIGNATURE pSig; ULONG cSig; IfFailThrow(pModule->GetMDImport()->GetTypeSpecFromToken(typeTok, &pSig, &cSig)); SigPointer psig(pSig, cSig); k = psig.IsPolyType(&typeContext); // Grab the active class and method instantiation. This exact instantiation is only // needed in the corner case of "generic" exception catching in shared // generic code. We don't need the exact instantiation if the token // doesn't contain E_T_VAR or E_T_MVAR. if ((k & hasSharableVarsMask) != 0) { Instantiation classInst; Instantiation methodInst; pCf->GetExactGenericInstantiations(&classInst,&methodInst); SigTypeContext::InitTypeContext(pMD,classInst, methodInst,&typeContext); } } return ClassLoader::LoadTypeDefOrRefOrSpecThrowing(pModule, typeTok, &typeContext, ClassLoader::ReturnNullIfNotFound); } #endif // #ifndef DACCESS_COMPILE //----------------------------------------------------------------------------- // Ngen info manager //----------------------------------------------------------------------------- BOOL ReadyToRunJitManager::GetBoundariesAndVars( const DebugInfoRequest & request, IN FP_IDS_NEW fpNew, IN void * pNewData, OUT ULONG32 * pcMap, OUT ICorDebugInfo::OffsetMapping **ppMap, OUT ULONG32 * pcVars, OUT ICorDebugInfo::NativeVarInfo **ppVars) { CONTRACTL { THROWS; // on OOM. GC_NOTRIGGER; // getting vars shouldn't trigger SUPPORTS_DAC; } CONTRACTL_END; EECodeInfo codeInfo(request.GetStartAddress()); if (!codeInfo.IsValid()) return FALSE; ReadyToRunInfo * pReadyToRunInfo = JitTokenToReadyToRunInfo(codeInfo.GetMethodToken()); PTR_RUNTIME_FUNCTION pRuntimeFunction = JitTokenToRuntimeFunction(codeInfo.GetMethodToken()); PTR_BYTE pDebugInfo = pReadyToRunInfo->GetDebugInfo(pRuntimeFunction); if (pDebugInfo == NULL) return FALSE; // Uncompress. This allocates memory and may throw. CompressDebugInfo::RestoreBoundariesAndVars( fpNew, pNewData, // allocators pDebugInfo, // input pcMap, ppMap, // output pcVars, ppVars, // output FALSE); // no patchpoint info return TRUE; } #ifdef DACCESS_COMPILE // // Need to write out debug info // void ReadyToRunJitManager::EnumMemoryRegionsForMethodDebugInfo(CLRDataEnumMemoryFlags flags, MethodDesc * pMD) { SUPPORTS_DAC; EECodeInfo codeInfo(pMD->GetNativeCode()); if (!codeInfo.IsValid()) return; ReadyToRunInfo * pReadyToRunInfo = JitTokenToReadyToRunInfo(codeInfo.GetMethodToken()); PTR_RUNTIME_FUNCTION pRuntimeFunction = JitTokenToRuntimeFunction(codeInfo.GetMethodToken()); PTR_BYTE pDebugInfo = pReadyToRunInfo->GetDebugInfo(pRuntimeFunction); if (pDebugInfo == NULL) return; CompressDebugInfo::EnumMemoryRegions(flags, pDebugInfo, FALSE); } #endif PCODE ReadyToRunJitManager::GetCodeAddressForRelOffset(const METHODTOKEN& MethodToken, DWORD relOffset) { WRAPPER_NO_CONTRACT; MethodRegionInfo methodRegionInfo; JitTokenToMethodRegionInfo(MethodToken, &methodRegionInfo); if (relOffset < methodRegionInfo.hotSize) return methodRegionInfo.hotStartAddress + relOffset; SIZE_T coldOffset = relOffset - methodRegionInfo.hotSize; _ASSERTE(coldOffset < methodRegionInfo.coldSize); return methodRegionInfo.coldStartAddress + coldOffset; } BOOL ReadyToRunJitManager::JitCodeToMethodInfo(RangeSection * pRangeSection, PCODE currentPC, MethodDesc** ppMethodDesc, OUT EECodeInfo * pCodeInfo) { CONTRACTL { NOTHROW; GC_NOTRIGGER; SUPPORTS_DAC; } CONTRACTL_END; // READYTORUN: FUTURE: Hot-cold spliting // If the address is in a thunk, return NULL. if (GetStubCodeBlockKind(pRangeSection, currentPC) != STUB_CODE_BLOCK_UNKNOWN) { return FALSE; } TADDR currentInstr = PCODEToPINSTR(currentPC); TADDR ImageBase = pRangeSection->LowAddress; DWORD RelativePc = (DWORD)(currentInstr - ImageBase); Module * pModule = dac_cast<PTR_Module>(pRangeSection->pHeapListOrZapModule); ReadyToRunInfo * pInfo = pModule->GetReadyToRunInfo(); COUNT_T nRuntimeFunctions = pInfo->m_nRuntimeFunctions; PTR_RUNTIME_FUNCTION pRuntimeFunctions = pInfo->m_pRuntimeFunctions; int MethodIndex = NativeUnwindInfoLookupTable::LookupUnwindInfoForMethod(RelativePc, pRuntimeFunctions, 0, nRuntimeFunctions - 1); if (MethodIndex < 0) return FALSE; if (ppMethodDesc == NULL && pCodeInfo == NULL) { // Bail early if caller doesn't care about the MethodDesc or EECodeInfo. // Avoiding the method desc lookups below also prevents deadlocks when this // is called from IsManagedCode. return TRUE; } #ifdef FEATURE_EH_FUNCLETS // Save the raw entry PTR_RUNTIME_FUNCTION RawFunctionEntry = pRuntimeFunctions + MethodIndex; MethodDesc *pMethodDesc; while ((pMethodDesc = pInfo->GetMethodDescForEntryPoint(ImageBase + RUNTIME_FUNCTION__BeginAddress(pRuntimeFunctions + MethodIndex))) == NULL) MethodIndex--; #endif PTR_RUNTIME_FUNCTION FunctionEntry = pRuntimeFunctions + MethodIndex; if (ppMethodDesc) { #ifdef FEATURE_EH_FUNCLETS *ppMethodDesc = pMethodDesc; #else *ppMethodDesc = pInfo->GetMethodDescForEntryPoint(ImageBase + RUNTIME_FUNCTION__BeginAddress(FunctionEntry)); #endif _ASSERTE(*ppMethodDesc != NULL); } if (pCodeInfo) { pCodeInfo->m_relOffset = (DWORD) (RelativePc - RUNTIME_FUNCTION__BeginAddress(FunctionEntry)); // We are using RUNTIME_FUNCTION as METHODTOKEN pCodeInfo->m_methodToken = METHODTOKEN(pRangeSection, dac_cast<TADDR>(FunctionEntry)); #ifdef FEATURE_EH_FUNCLETS AMD64_ONLY(_ASSERTE((RawFunctionEntry->UnwindData & RUNTIME_FUNCTION_INDIRECT) == 0)); pCodeInfo->m_pFunctionEntry = RawFunctionEntry; #endif } return TRUE; } #if defined(FEATURE_EH_FUNCLETS) PTR_RUNTIME_FUNCTION ReadyToRunJitManager::LazyGetFunctionEntry(EECodeInfo * pCodeInfo) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; if (!pCodeInfo->IsValid()) { return NULL; } // code:ReadyToRunJitManager::JitCodeToMethodInfo computes PTR_RUNTIME_FUNCTION eagerly. This path is only // reachable via EECodeInfo::GetMainFunctionInfo, and so we can just return the main entry. _ASSERTE(pCodeInfo->GetRelOffset() == 0); return dac_cast<PTR_RUNTIME_FUNCTION>(pCodeInfo->GetMethodToken().m_pCodeHeader); } TADDR ReadyToRunJitManager::GetFuncletStartAddress(EECodeInfo * pCodeInfo) { LIMITED_METHOD_DAC_CONTRACT; // READYTORUN: FUTURE: Hot-cold spliting return IJitManager::GetFuncletStartAddress(pCodeInfo); } DWORD ReadyToRunJitManager::GetFuncletStartOffsets(const METHODTOKEN& MethodToken, DWORD* pStartFuncletOffsets, DWORD dwLength) { PTR_RUNTIME_FUNCTION pFirstFuncletFunctionEntry = dac_cast<PTR_RUNTIME_FUNCTION>(MethodToken.m_pCodeHeader) + 1; TADDR moduleBase = JitTokenToModuleBase(MethodToken); DWORD nFunclets = 0; MethodRegionInfo regionInfo; JitTokenToMethodRegionInfo(MethodToken, &regionInfo); // pFirstFuncletFunctionEntry will work for ARM when passed to GetFuncletStartOffsetsHelper() // even if it is a fragment of the main body and not a RUNTIME_FUNCTION for the beginning // of the first hot funclet, because GetFuncletStartOffsetsHelper() will skip all the function // fragments until the first funclet, if any, is found. GetFuncletStartOffsetsHelper(regionInfo.hotStartAddress, regionInfo.hotSize, 0, pFirstFuncletFunctionEntry, moduleBase, &nFunclets, pStartFuncletOffsets, dwLength); // READYTORUN: FUTURE: Hot/cold splitting return nFunclets; } BOOL ReadyToRunJitManager::IsFilterFunclet(EECodeInfo * pCodeInfo) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; if (!pCodeInfo->IsFunclet()) return FALSE; // Get address of the personality routine for the function being queried. SIZE_T size; PTR_VOID pUnwindData = GetUnwindDataBlob(pCodeInfo->GetModuleBase(), pCodeInfo->GetFunctionEntry(), &size); _ASSERTE(pUnwindData != NULL); // Personality routine is always the last element of the unwind data DWORD rvaPersonalityRoutine = *(dac_cast<PTR_DWORD>(dac_cast<TADDR>(pUnwindData) + size) - 1); // Get the personality routine for the first function in the module, which is guaranteed to be not a funclet. ReadyToRunInfo * pInfo = JitTokenToReadyToRunInfo(pCodeInfo->GetMethodToken()); if (pInfo->m_nRuntimeFunctions == 0) return FALSE; PTR_VOID pFirstUnwindData = GetUnwindDataBlob(pCodeInfo->GetModuleBase(), pInfo->m_pRuntimeFunctions, &size); _ASSERTE(pFirstUnwindData != NULL); DWORD rvaFirstPersonalityRoutine = *(dac_cast<PTR_DWORD>(dac_cast<TADDR>(pFirstUnwindData) + size) - 1); // Compare the two personality routines. If they are different, then the current function is a filter funclet. BOOL fRet = (rvaPersonalityRoutine != rvaFirstPersonalityRoutine); // Verify that the optimized implementation is in sync with the slow implementation _ASSERTE(fRet == IJitManager::IsFilterFunclet(pCodeInfo)); return fRet; } #endif // FEATURE_EH_FUNCLETS void ReadyToRunJitManager::JitTokenToMethodRegionInfo(const METHODTOKEN& MethodToken, MethodRegionInfo * methodRegionInfo) { CONTRACTL { NOTHROW; GC_NOTRIGGER; HOST_NOCALLS; SUPPORTS_DAC; PRECONDITION(methodRegionInfo != NULL); } CONTRACTL_END; // READYTORUN: FUTURE: Hot-cold spliting methodRegionInfo->hotStartAddress = JitTokenToStartAddress(MethodToken); methodRegionInfo->hotSize = GetCodeManager()->GetFunctionSize(GetGCInfoToken(MethodToken)); methodRegionInfo->coldStartAddress = 0; methodRegionInfo->coldSize = 0; } #ifdef DACCESS_COMPILE void ReadyToRunJitManager::EnumMemoryRegions(CLRDataEnumMemoryFlags flags) { IJitManager::EnumMemoryRegions(flags); } #if defined(FEATURE_EH_FUNCLETS) // // EnumMemoryRegionsForMethodUnwindInfo - enumerate the memory necessary to read the unwind info for the // specified method. // // Note that in theory, a dump generation library could save the unwind information itself without help // from us, since it's stored in the image in the standard function table layout for Win64. However, // dump-generation libraries assume that the image will be available at debug time, and if the image // isn't available then it is acceptable for stackwalking to break. For ngen images (which are created // on the client), it usually isn't possible to have the image available at debug time, and so for minidumps // we must explicitly ensure the unwind information is saved into the dump. // // Arguments: // flags - EnumMem flags // pMD - MethodDesc for the method in question // void ReadyToRunJitManager::EnumMemoryRegionsForMethodUnwindInfo(CLRDataEnumMemoryFlags flags, EECodeInfo * pCodeInfo) { // Get the RUNTIME_FUNCTION entry for this method PTR_RUNTIME_FUNCTION pRtf = pCodeInfo->GetFunctionEntry(); if (pRtf==NULL) { return; } // Enumerate the function entry and other entries needed to locate it in the program exceptions directory ReadyToRunInfo * pReadyToRunInfo = JitTokenToReadyToRunInfo(pCodeInfo->GetMethodToken()); EnumRuntimeFunctionEntriesToFindEntry(pRtf, pReadyToRunInfo->GetImage()); SIZE_T size; PTR_VOID pUnwindData = GetUnwindDataBlob(pCodeInfo->GetModuleBase(), pRtf, &size); if (pUnwindData != NULL) DacEnumMemoryRegion(PTR_TO_TADDR(pUnwindData), size); } #endif //FEATURE_EH_FUNCLETS #endif // #ifdef DACCESS_COMPILE #endif
1
dotnet/runtime
66,234
Remove compiler warning suppression
Contributes to https://github.com/dotnet/runtime/issues/66154 All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address. ~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~ Upstream PR: https://github.com/libunwind/libunwind/pull/333 /cc @GrabYourPitchforks
AaronRobinsonMSFT
2022-03-05T03:45:49Z
2022-03-09T00:57:12Z
220e67755ae6323a01011b83070dff6f84b02519
853e494abd1c1e12d28a76829b4680af7f694afa
Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154 All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address. ~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~ Upstream PR: https://github.com/libunwind/libunwind/pull/333 /cc @GrabYourPitchforks
./src/coreclr/vm/common.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // common.h - precompiled headers include for the COM+ Execution Engine // // #ifndef _common_h_ #define _common_h_ #if defined(_MSC_VER) && defined(HOST_X86) && !defined(FPO_ON) #pragma optimize("y", on) // Small critical routines, don't put in EBP frame #define FPO_ON 1 #define COMMON_TURNED_FPO_ON 1 #endif #define USE_COM_CONTEXT_DEF #if defined(_DEBUG) #define DEBUG_REGDISPLAY #endif #ifdef _MSC_VER // These don't seem useful, so turning them off is no big deal #pragma warning(disable:4201) // nameless struct/union #pragma warning(disable:4510) // can't generate default constructor //#pragma warning(disable:4511) // can't generate copy constructor #pragma warning(disable:4512) // can't generate assignment constructor #pragma warning(disable:4610) // user defined constructor required #pragma warning(disable:4211) // nonstandard extention used (char name[0] in structs) #pragma warning(disable:4268) // 'const' static/global data initialized with compiler generated default constructor fills the object with zeros #pragma warning(disable:4238) // nonstandard extension used : class rvalue used as lvalue #pragma warning(disable:4291) // no matching operator delete found #pragma warning(disable:4345) // behavior change: an object of POD type constructed with an initializer of the form () will be default-initialized // Depending on the code base, you may want to not disable these #pragma warning(disable:4245) // assigning signed / unsigned //#pragma warning(disable:4146) // unary minus applied to unsigned //#pragma warning(disable:4244) // loss of data int -> char .. #pragma warning(disable:4127) // conditional expression is constant #pragma warning(disable:4100) // unreferenced formal parameter #pragma warning(1:4189) // local variable initialized but not used #ifndef DEBUG #pragma warning(disable:4505) // unreferenced local function has been removed //#pragma warning(disable:4702) // unreachable code #pragma warning(disable:4313) // 'format specifier' in format string conflicts with argument %d of type 'type' #endif // !DEBUG // CONSIDER put these back in #pragma warning(disable:4063) // bad switch value for enum (only in Disasm.cpp) #pragma warning(disable:4710) // function not inlined #pragma warning(disable:4527) // user-defined destructor required #pragma warning(disable:4513) // destructor could not be generated // <TODO>TODO we really probably need this one put back in!!!</TODO> //#pragma warning(disable:4701) // local variable may be used without being initialized #endif // _MSC_VER #define _CRT_DEPENDENCY_ //this code depends on the crt file functions #include <stdint.h> #include <stddef.h> #include <winwrap.h> #include <windef.h> #include <winnt.h> #include <stdlib.h> #include <wchar.h> #include <objbase.h> #include <float.h> #include <math.h> #include <time.h> #include <limits.h> #include <assert.h> #include <olectl.h> #ifdef _MSC_VER //non inline intrinsics are faster #pragma function(memcpy,memcmp,strcmp,strcpy,strlen,strcat) #endif // _MSC_VER #include "volatile.h" #include <../../debug/inc/dbgtargetcontext.h> //----------------------------------------------------------------------------------------------------------- #include "stdmacros.h" #define POISONC ((UINT_PTR)((sizeof(int *) == 4)?0xCCCCCCCCL:I64(0xCCCCCCCCCCCCCCCC))) #include "switches.h" #include "holder.h" #include "classnames.h" #include "util.hpp" #include "corpriv.h" #include <daccess.h> typedef VPTR(class LoaderAllocator) PTR_LoaderAllocator; typedef VPTR(class AppDomain) PTR_AppDomain; typedef DPTR(class ArrayBase) PTR_ArrayBase; typedef DPTR(class Assembly) PTR_Assembly; typedef DPTR(class AssemblyBaseObject) PTR_AssemblyBaseObject; typedef DPTR(class AssemblyLoadContextBaseObject) PTR_AssemblyLoadContextBaseObject; typedef DPTR(class AssemblyBinder) PTR_AssemblyBinder; typedef DPTR(class AssemblyNameBaseObject) PTR_AssemblyNameBaseObject; typedef VPTR(class BaseDomain) PTR_BaseDomain; typedef DPTR(class ClassLoader) PTR_ClassLoader; typedef DPTR(class ComCallMethodDesc) PTR_ComCallMethodDesc; typedef DPTR(class ComPlusCallMethodDesc) PTR_ComPlusCallMethodDesc; typedef VPTR(class DebugInterface) PTR_DebugInterface; typedef DPTR(class Dictionary) PTR_Dictionary; typedef DPTR(class DomainAssembly) PTR_DomainAssembly; typedef DPTR(struct FailedAssembly) PTR_FailedAssembly; typedef VPTR(class EditAndContinueModule) PTR_EditAndContinueModule; typedef DPTR(class EEClass) PTR_EEClass; typedef DPTR(class DelegateEEClass) PTR_DelegateEEClass; typedef DPTR(struct DomainLocalModule) PTR_DomainLocalModule; typedef VPTR(class EECodeManager) PTR_EECodeManager; typedef DPTR(class EEConfig) PTR_EEConfig; typedef VPTR(class EEDbgInterfaceImpl) PTR_EEDbgInterfaceImpl; typedef VPTR(class DebugInfoManager) PTR_DebugInfoManager; typedef DPTR(class FieldDesc) PTR_FieldDesc; typedef VPTR(class Frame) PTR_Frame; typedef DPTR(class GCFrame) PTR_GCFrame; typedef VPTR(class ICodeManager) PTR_ICodeManager; typedef VPTR(class IJitManager) PTR_IJitManager; typedef VPTR(struct IUnknown) PTR_IUnknown; typedef DPTR(class InstMethodHashTable) PTR_InstMethodHashTable; typedef DPTR(class MetaSig) PTR_MetaSig; typedef DPTR(class MethodDesc) PTR_MethodDesc; typedef DPTR(class MethodDescChunk) PTR_MethodDescChunk; typedef DPTR(class MethodImpl) PTR_MethodImpl; typedef DPTR(class MethodTable) PTR_MethodTable; typedef DPTR(class CoreLibBinder) PTR_CoreLibBinder; typedef VPTR(class Module) PTR_Module; typedef DPTR(class NDirectMethodDesc) PTR_NDirectMethodDesc; typedef DPTR(class Thread) PTR_Thread; typedef DPTR(class Object) PTR_Object; typedef DPTR(PTR_Object) PTR_PTR_Object; typedef DPTR(class DelegateObject) PTR_DelegateObject; typedef DPTR(class ObjHeader) PTR_ObjHeader; typedef DPTR(class Precode) PTR_Precode; typedef VPTR(class ReflectionModule) PTR_ReflectionModule; typedef DPTR(class ReflectClassBaseObject) PTR_ReflectClassBaseObject; typedef DPTR(class ReflectMethodObject) PTR_ReflectMethodObject; typedef DPTR(class ReflectFieldObject) PTR_ReflectFieldObject; typedef DPTR(class ReflectModuleBaseObject) PTR_ReflectModuleBaseObject; typedef DPTR(class ReJitManager) PTR_ReJitManager; typedef DPTR(struct ReJitInfo) PTR_ReJitInfo; typedef DPTR(struct SharedReJitInfo) PTR_SharedReJitInfo; typedef DPTR(class StringObject) PTR_StringObject; typedef DPTR(class TypeHandle) PTR_TypeHandle; typedef VPTR(class VirtualCallStubManager) PTR_VirtualCallStubManager; typedef VPTR(class VirtualCallStubManagerManager) PTR_VirtualCallStubManagerManager; typedef VPTR(class IGCHeap) PTR_IGCHeap; // // _UNCHECKED_OBJECTREF is for code that can't deal with DEBUG OBJECTREFs // typedef PTR_Object _UNCHECKED_OBJECTREF; typedef DPTR(PTR_Object) PTR_UNCHECKED_OBJECTREF; #ifdef USE_CHECKED_OBJECTREFS class OBJECTREF; #else typedef PTR_Object OBJECTREF; #endif typedef DPTR(OBJECTREF) PTR_OBJECTREF; typedef DPTR(PTR_OBJECTREF) PTR_PTR_OBJECTREF; Thread* GetThread(); Thread* GetThreadNULLOk(); EXTERN_C Thread* STDCALL GetThreadHelper(); void SetThread(Thread*); // This is a mechanism by which macros can make the Thread pointer available to inner scopes // that is robust to code changes. If the outer Thread no longer is available for some reason // (e.g. code refactoring), this GET_THREAD() macro will fall back to calling GetThread(). const bool CURRENT_THREAD_AVAILABLE = false; Thread * const CURRENT_THREAD = NULL; #define GET_THREAD() (CURRENT_THREAD_AVAILABLE ? CURRENT_THREAD : GetThread()) #define MAKE_CURRENT_THREAD_AVAILABLE() \ Thread * __pThread = GET_THREAD(); \ MAKE_CURRENT_THREAD_AVAILABLE_EX(__pThread) #define MAKE_CURRENT_THREAD_AVAILABLE_EX(__pThread) \ Thread * CURRENT_THREAD = __pThread; \ const bool CURRENT_THREAD_AVAILABLE = true; \ (void)CURRENT_THREAD_AVAILABLE; /* silence "local variable initialized but not used" warning */ \ #ifndef DACCESS_COMPILE EXTERN_C AppDomain* STDCALL GetAppDomain(); #endif //!DACCESS_COMPILE inline void RetailBreak() { #ifdef TARGET_X86 __asm int 3 #else DebugBreak(); #endif } extern BOOL isMemoryReadable(const TADDR start, unsigned len); #ifndef memcpyUnsafe_f #define memcpyUnsafe_f // use this when you want to memcpy something that contains GC refs FORCEINLINE void* memcpyUnsafe(void *dest, const void *src, size_t len) { WRAPPER_NO_CONTRACT; return memcpy(dest, src, len); } #endif // !memcpyUnsafe_f // // By default logging, and debug GC are enabled under debug // // These can be enabled in non-debug by removing the #ifdef _DEBUG // allowing one to log/check_gc a free build. // #if defined(_DEBUG) && !defined(DACCESS_COMPILE) //If memcpy has been defined to PAL_memcpy, we undefine it so that this case //can be covered by the if !defined(memcpy) block below #ifdef HOST_UNIX #if IS_REDEFINED_IN_PAL(memcpy) #undef memcpy #endif //IS_REDEFINED_IN_PAL #endif //HOST_UNIX // You should be using CopyValueClass if you are doing an memcpy // in the CG heap. #if !defined(memcpy) FORCEINLINE void* memcpyNoGCRefs(void * dest, const void * src, size_t len) { WRAPPER_NO_CONTRACT; #ifdef HOST_UNIX return PAL_memcpy(dest, src, len); #else //HOST_UNIX return memcpy(dest, src, len); #endif //HOST_UNIX } extern "C" void * __cdecl GCSafeMemCpy(void *, const void *, size_t); #define memcpy(dest, src, len) GCSafeMemCpy(dest, src, len) #endif // !defined(memcpy) #else // !_DEBUG && !DACCESS_COMPILE FORCEINLINE void* memcpyNoGCRefs(void * dest, const void * src, size_t len) { WRAPPER_NO_CONTRACT; return memcpy(dest, src, len); } #endif // !_DEBUG && !DACCESS_COMPILE namespace Loader { typedef enum { Load, //should load DontLoad, //should not load SafeLookup //take no locks, no allocations } LoadFlag; } #if !defined(DACCESS_COMPILE) #if defined(TARGET_WINDOWS) && defined(TARGET_AMD64) EXTERN_C void STDCALL ClrRestoreNonvolatileContext(PCONTEXT ContextRecord); #elif !(defined(TARGET_WINDOWS) && defined(TARGET_X86)) // !(TARGET_WINDOWS && TARGET_AMD64) && !(TARGET_WINDOWS && TARGET_X86) inline void ClrRestoreNonvolatileContext(PCONTEXT ContextRecord) { // Falling back to RtlRestoreContext() for now, though it should be possible to have simpler variants for these cases RtlRestoreContext(ContextRecord, NULL); } #endif // TARGET_WINDOWS && TARGET_AMD64 #endif // !DACCESS_COMPILE // src/inc #include "utilcode.h" #include "log.h" #include "loaderheap.h" #include "stgpool.h" // src/vm #include "gcenv.interlocked.h" #include "gcenv.interlocked.inl" #include "util.hpp" #include "ibclogger.h" #include "eepolicy.h" #include "vars.hpp" #include "crst.h" #include "argslot.h" #include "stublink.h" #include "cgensys.h" #include "ceemain.h" #include "hash.h" #include "eecontract.h" #include "pedecoder.h" #include "sstring.h" #include "slist.h" #include "eeconfig.h" #include "spinlock.h" #ifdef FEATURE_COMINTEROP #include "stdinterfaces.h" #endif #include "typehandle.h" #include "methodtable.h" #include "typectxt.h" #include "eehash.h" #include "vars.hpp" #include "eventstore.hpp" #include "synch.h" #include "regdisp.h" #include "stackframe.h" #include "gms.h" #include "fcall.h" #include "syncblk.h" #include "gcdesc.h" #include "specialstatics.h" #include "object.h" // <NICE> We should not really need to put this so early... </NICE> #include "gchelpers.h" #include "peassembly.h" #include "clrex.h" #include "clsload.hpp" // <NICE> We should not really need to put this so early... </NICE> #include "siginfo.hpp" #include "binder.h" #include "jitinterface.h" // <NICE> We should not really need to put this so early... </NICE> #include "ceeload.h" #include "memberload.h" #include "genericdict.h" #include "class.h" #include "codeman.h" #include "threads.h" #include "clrex.inl" #include "loaderallocator.hpp" #include "appdomain.hpp" #include "appdomain.inl" #include "assembly.hpp" #include "peassembly.inl" #include "excep.h" #include "method.hpp" #include "field.h" #include "callingconvention.h" #include "frames.h" #include "qcall.h" #include "callhelpers.h" #include "stackwalk.h" #include "stackingallocator.h" #include "interoputil.h" #include "wrappers.h" #include "dynamicmethod.h" #include "gcstress.h" HRESULT EnsureRtlFunctions(); // Helper function returns the base of clr module. void* GetClrModuleBase(); #if defined(TARGET_X86) || defined(TARGET_AMD64) // // Strong memory model. No memory barrier necessary before writing object references into GC heap. // #define GCHeapMemoryBarrier() #else // // The weak memory model forces us to raise memory barriers before writing object references into GC heap. This is required // for both security and to make most managed code written against strong memory model work. Under normal circumstances, this memory // barrier is part of GC write barrier. However, there are a few places in the VM that set cards manually without going through // regular GC write barrier. These places need to this macro. This macro is usually used before memcpy-like operation followed // by SetCardsAfterBulkCopy. // #define GCHeapMemoryBarrier() MemoryBarrier() #endif // use this when you want to memcpy something that contains GC refs void memmoveGCRefs(void *dest, const void *src, size_t len); #if defined(_DEBUG) // This catches CANNOTTHROW macros that occur outside the scope of a CONTRACT. // Note that it's important for m_CannotThrowLineNums to be NULL. struct DummyGlobalContract { int *m_CannotThrowLineNums; //= NULL; LPVOID *m_CannotThrowRecords; //= NULL; }; extern DummyGlobalContract ___contract; #endif // defined(_DEBUG) // All files get to see all of these .inl files to make sure all files // get the benefit of inlining. #include "ceeload.inl" #include "typedesc.inl" #include "class.inl" #include "methodtable.inl" #include "typehandle.inl" #include "object.inl" #include "clsload.inl" #include "method.inl" #include "syncblk.inl" #include "threads.inl" #include "eehash.inl" #include "eventtrace.inl" #if defined(COMMON_TURNED_FPO_ON) #pragma optimize("", on) // Go back to command line default optimizations #undef COMMON_TURNED_FPO_ON #undef FPO_ON #endif #endif // !_common_h_
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // common.h - precompiled headers include for the COM+ Execution Engine // // #ifndef _common_h_ #define _common_h_ #if defined(_MSC_VER) && defined(HOST_X86) && !defined(FPO_ON) #pragma optimize("y", on) // Small critical routines, don't put in EBP frame #define FPO_ON 1 #define COMMON_TURNED_FPO_ON 1 #endif #define USE_COM_CONTEXT_DEF #if defined(_DEBUG) #define DEBUG_REGDISPLAY #endif #ifdef _MSC_VER // These don't seem useful, so turning them off is no big deal #pragma warning(disable:4201) // nameless struct/union #pragma warning(disable:4512) // can't generate assignment constructor #pragma warning(disable:4211) // nonstandard extention used (char name[0] in structs) #pragma warning(disable:4268) // 'const' static/global data initialized with compiler generated default constructor fills the object with zeros #pragma warning(disable:4238) // nonstandard extension used : class rvalue used as lvalue #pragma warning(disable:4291) // no matching operator delete found #pragma warning(disable:4345) // behavior change: an object of POD type constructed with an initializer of the form () will be default-initialized // Depending on the code base, you may want to not disable these #pragma warning(disable:4245) // assigning signed / unsigned #pragma warning(disable:4127) // conditional expression is constant #pragma warning(disable:4100) // unreferenced formal parameter #pragma warning(1:4189) // local variable initialized but not used #ifndef DEBUG #pragma warning(disable:4505) // unreferenced local function has been removed #pragma warning(disable:4313) // 'format specifier' in format string conflicts with argument %d of type 'type' #endif // !DEBUG // CONSIDER put these back in #pragma warning(disable:4063) // bad switch value for enum (only in Disasm.cpp) #pragma warning(disable:4710) // function not inlined #pragma warning(disable:4527) // user-defined destructor required #pragma warning(disable:4513) // destructor could not be generated #endif // _MSC_VER #define _CRT_DEPENDENCY_ //this code depends on the crt file functions #include <stdint.h> #include <stddef.h> #include <winwrap.h> #include <windef.h> #include <winnt.h> #include <stdlib.h> #include <wchar.h> #include <objbase.h> #include <float.h> #include <math.h> #include <time.h> #include <limits.h> #include <assert.h> #include <olectl.h> #ifdef _MSC_VER //non inline intrinsics are faster #pragma function(memcpy,memcmp,strcmp,strcpy,strlen,strcat) #endif // _MSC_VER #include "volatile.h" #include <../../debug/inc/dbgtargetcontext.h> //----------------------------------------------------------------------------------------------------------- #include "stdmacros.h" #define POISONC ((UINT_PTR)((sizeof(int *) == 4)?0xCCCCCCCCL:I64(0xCCCCCCCCCCCCCCCC))) #include "switches.h" #include "holder.h" #include "classnames.h" #include "util.hpp" #include "corpriv.h" #include <daccess.h> typedef VPTR(class LoaderAllocator) PTR_LoaderAllocator; typedef VPTR(class AppDomain) PTR_AppDomain; typedef DPTR(class ArrayBase) PTR_ArrayBase; typedef DPTR(class Assembly) PTR_Assembly; typedef DPTR(class AssemblyBaseObject) PTR_AssemblyBaseObject; typedef DPTR(class AssemblyLoadContextBaseObject) PTR_AssemblyLoadContextBaseObject; typedef DPTR(class AssemblyBinder) PTR_AssemblyBinder; typedef DPTR(class AssemblyNameBaseObject) PTR_AssemblyNameBaseObject; typedef VPTR(class BaseDomain) PTR_BaseDomain; typedef DPTR(class ClassLoader) PTR_ClassLoader; typedef DPTR(class ComCallMethodDesc) PTR_ComCallMethodDesc; typedef DPTR(class ComPlusCallMethodDesc) PTR_ComPlusCallMethodDesc; typedef VPTR(class DebugInterface) PTR_DebugInterface; typedef DPTR(class Dictionary) PTR_Dictionary; typedef DPTR(class DomainAssembly) PTR_DomainAssembly; typedef DPTR(struct FailedAssembly) PTR_FailedAssembly; typedef VPTR(class EditAndContinueModule) PTR_EditAndContinueModule; typedef DPTR(class EEClass) PTR_EEClass; typedef DPTR(class DelegateEEClass) PTR_DelegateEEClass; typedef DPTR(struct DomainLocalModule) PTR_DomainLocalModule; typedef VPTR(class EECodeManager) PTR_EECodeManager; typedef DPTR(class EEConfig) PTR_EEConfig; typedef VPTR(class EEDbgInterfaceImpl) PTR_EEDbgInterfaceImpl; typedef VPTR(class DebugInfoManager) PTR_DebugInfoManager; typedef DPTR(class FieldDesc) PTR_FieldDesc; typedef VPTR(class Frame) PTR_Frame; typedef DPTR(class GCFrame) PTR_GCFrame; typedef VPTR(class ICodeManager) PTR_ICodeManager; typedef VPTR(class IJitManager) PTR_IJitManager; typedef VPTR(struct IUnknown) PTR_IUnknown; typedef DPTR(class InstMethodHashTable) PTR_InstMethodHashTable; typedef DPTR(class MetaSig) PTR_MetaSig; typedef DPTR(class MethodDesc) PTR_MethodDesc; typedef DPTR(class MethodDescChunk) PTR_MethodDescChunk; typedef DPTR(class MethodImpl) PTR_MethodImpl; typedef DPTR(class MethodTable) PTR_MethodTable; typedef DPTR(class CoreLibBinder) PTR_CoreLibBinder; typedef VPTR(class Module) PTR_Module; typedef DPTR(class NDirectMethodDesc) PTR_NDirectMethodDesc; typedef DPTR(class Thread) PTR_Thread; typedef DPTR(class Object) PTR_Object; typedef DPTR(PTR_Object) PTR_PTR_Object; typedef DPTR(class DelegateObject) PTR_DelegateObject; typedef DPTR(class ObjHeader) PTR_ObjHeader; typedef DPTR(class Precode) PTR_Precode; typedef VPTR(class ReflectionModule) PTR_ReflectionModule; typedef DPTR(class ReflectClassBaseObject) PTR_ReflectClassBaseObject; typedef DPTR(class ReflectMethodObject) PTR_ReflectMethodObject; typedef DPTR(class ReflectFieldObject) PTR_ReflectFieldObject; typedef DPTR(class ReflectModuleBaseObject) PTR_ReflectModuleBaseObject; typedef DPTR(class ReJitManager) PTR_ReJitManager; typedef DPTR(struct ReJitInfo) PTR_ReJitInfo; typedef DPTR(struct SharedReJitInfo) PTR_SharedReJitInfo; typedef DPTR(class StringObject) PTR_StringObject; typedef DPTR(class TypeHandle) PTR_TypeHandle; typedef VPTR(class VirtualCallStubManager) PTR_VirtualCallStubManager; typedef VPTR(class VirtualCallStubManagerManager) PTR_VirtualCallStubManagerManager; typedef VPTR(class IGCHeap) PTR_IGCHeap; // // _UNCHECKED_OBJECTREF is for code that can't deal with DEBUG OBJECTREFs // typedef PTR_Object _UNCHECKED_OBJECTREF; typedef DPTR(PTR_Object) PTR_UNCHECKED_OBJECTREF; #ifdef USE_CHECKED_OBJECTREFS class OBJECTREF; #else typedef PTR_Object OBJECTREF; #endif typedef DPTR(OBJECTREF) PTR_OBJECTREF; typedef DPTR(PTR_OBJECTREF) PTR_PTR_OBJECTREF; Thread* GetThread(); Thread* GetThreadNULLOk(); EXTERN_C Thread* STDCALL GetThreadHelper(); void SetThread(Thread*); // This is a mechanism by which macros can make the Thread pointer available to inner scopes // that is robust to code changes. If the outer Thread no longer is available for some reason // (e.g. code refactoring), this GET_THREAD() macro will fall back to calling GetThread(). const bool CURRENT_THREAD_AVAILABLE = false; Thread * const CURRENT_THREAD = NULL; #define GET_THREAD() (CURRENT_THREAD_AVAILABLE ? CURRENT_THREAD : GetThread()) #define MAKE_CURRENT_THREAD_AVAILABLE() \ Thread * __pThread = GET_THREAD(); \ MAKE_CURRENT_THREAD_AVAILABLE_EX(__pThread) #define MAKE_CURRENT_THREAD_AVAILABLE_EX(__pThread) \ Thread * CURRENT_THREAD = __pThread; \ const bool CURRENT_THREAD_AVAILABLE = true; \ (void)CURRENT_THREAD_AVAILABLE; /* silence "local variable initialized but not used" warning */ \ #ifndef DACCESS_COMPILE EXTERN_C AppDomain* STDCALL GetAppDomain(); #endif //!DACCESS_COMPILE inline void RetailBreak() { #ifdef TARGET_X86 __asm int 3 #else DebugBreak(); #endif } extern BOOL isMemoryReadable(const TADDR start, unsigned len); #ifndef memcpyUnsafe_f #define memcpyUnsafe_f // use this when you want to memcpy something that contains GC refs FORCEINLINE void* memcpyUnsafe(void *dest, const void *src, size_t len) { WRAPPER_NO_CONTRACT; return memcpy(dest, src, len); } #endif // !memcpyUnsafe_f // // By default logging, and debug GC are enabled under debug // // These can be enabled in non-debug by removing the #ifdef _DEBUG // allowing one to log/check_gc a free build. // #if defined(_DEBUG) && !defined(DACCESS_COMPILE) //If memcpy has been defined to PAL_memcpy, we undefine it so that this case //can be covered by the if !defined(memcpy) block below #ifdef HOST_UNIX #if IS_REDEFINED_IN_PAL(memcpy) #undef memcpy #endif //IS_REDEFINED_IN_PAL #endif //HOST_UNIX // You should be using CopyValueClass if you are doing an memcpy // in the CG heap. #if !defined(memcpy) FORCEINLINE void* memcpyNoGCRefs(void * dest, const void * src, size_t len) { WRAPPER_NO_CONTRACT; #ifdef HOST_UNIX return PAL_memcpy(dest, src, len); #else //HOST_UNIX return memcpy(dest, src, len); #endif //HOST_UNIX } extern "C" void * __cdecl GCSafeMemCpy(void *, const void *, size_t); #define memcpy(dest, src, len) GCSafeMemCpy(dest, src, len) #endif // !defined(memcpy) #else // !_DEBUG && !DACCESS_COMPILE FORCEINLINE void* memcpyNoGCRefs(void * dest, const void * src, size_t len) { WRAPPER_NO_CONTRACT; return memcpy(dest, src, len); } #endif // !_DEBUG && !DACCESS_COMPILE namespace Loader { typedef enum { Load, //should load DontLoad, //should not load SafeLookup //take no locks, no allocations } LoadFlag; } #if !defined(DACCESS_COMPILE) #if defined(TARGET_WINDOWS) && defined(TARGET_AMD64) EXTERN_C void STDCALL ClrRestoreNonvolatileContext(PCONTEXT ContextRecord); #elif !(defined(TARGET_WINDOWS) && defined(TARGET_X86)) // !(TARGET_WINDOWS && TARGET_AMD64) && !(TARGET_WINDOWS && TARGET_X86) inline void ClrRestoreNonvolatileContext(PCONTEXT ContextRecord) { // Falling back to RtlRestoreContext() for now, though it should be possible to have simpler variants for these cases RtlRestoreContext(ContextRecord, NULL); } #endif // TARGET_WINDOWS && TARGET_AMD64 #endif // !DACCESS_COMPILE // src/inc #include "utilcode.h" #include "log.h" #include "loaderheap.h" #include "stgpool.h" // src/vm #include "gcenv.interlocked.h" #include "gcenv.interlocked.inl" #include "util.hpp" #include "ibclogger.h" #include "eepolicy.h" #include "vars.hpp" #include "crst.h" #include "argslot.h" #include "stublink.h" #include "cgensys.h" #include "ceemain.h" #include "hash.h" #include "eecontract.h" #include "pedecoder.h" #include "sstring.h" #include "slist.h" #include "eeconfig.h" #include "spinlock.h" #ifdef FEATURE_COMINTEROP #include "stdinterfaces.h" #endif #include "typehandle.h" #include "methodtable.h" #include "typectxt.h" #include "eehash.h" #include "vars.hpp" #include "eventstore.hpp" #include "synch.h" #include "regdisp.h" #include "stackframe.h" #include "gms.h" #include "fcall.h" #include "syncblk.h" #include "gcdesc.h" #include "specialstatics.h" #include "object.h" // <NICE> We should not really need to put this so early... </NICE> #include "gchelpers.h" #include "peassembly.h" #include "clrex.h" #include "clsload.hpp" // <NICE> We should not really need to put this so early... </NICE> #include "siginfo.hpp" #include "binder.h" #include "jitinterface.h" // <NICE> We should not really need to put this so early... </NICE> #include "ceeload.h" #include "memberload.h" #include "genericdict.h" #include "class.h" #include "codeman.h" #include "threads.h" #include "clrex.inl" #include "loaderallocator.hpp" #include "appdomain.hpp" #include "appdomain.inl" #include "assembly.hpp" #include "peassembly.inl" #include "excep.h" #include "method.hpp" #include "field.h" #include "callingconvention.h" #include "frames.h" #include "qcall.h" #include "callhelpers.h" #include "stackwalk.h" #include "stackingallocator.h" #include "interoputil.h" #include "wrappers.h" #include "dynamicmethod.h" #include "gcstress.h" HRESULT EnsureRtlFunctions(); // Helper function returns the base of clr module. void* GetClrModuleBase(); #if defined(TARGET_X86) || defined(TARGET_AMD64) // // Strong memory model. No memory barrier necessary before writing object references into GC heap. // #define GCHeapMemoryBarrier() #else // // The weak memory model forces us to raise memory barriers before writing object references into GC heap. This is required // for both security and to make most managed code written against strong memory model work. Under normal circumstances, this memory // barrier is part of GC write barrier. However, there are a few places in the VM that set cards manually without going through // regular GC write barrier. These places need to this macro. This macro is usually used before memcpy-like operation followed // by SetCardsAfterBulkCopy. // #define GCHeapMemoryBarrier() MemoryBarrier() #endif // use this when you want to memcpy something that contains GC refs void memmoveGCRefs(void *dest, const void *src, size_t len); #if defined(_DEBUG) // This catches CANNOTTHROW macros that occur outside the scope of a CONTRACT. // Note that it's important for m_CannotThrowLineNums to be NULL. struct DummyGlobalContract { int *m_CannotThrowLineNums; //= NULL; LPVOID *m_CannotThrowRecords; //= NULL; }; extern DummyGlobalContract ___contract; #endif // defined(_DEBUG) // All files get to see all of these .inl files to make sure all files // get the benefit of inlining. #include "ceeload.inl" #include "typedesc.inl" #include "class.inl" #include "methodtable.inl" #include "typehandle.inl" #include "object.inl" #include "clsload.inl" #include "method.inl" #include "syncblk.inl" #include "threads.inl" #include "eehash.inl" #include "eventtrace.inl" #if defined(COMMON_TURNED_FPO_ON) #pragma optimize("", on) // Go back to command line default optimizations #undef COMMON_TURNED_FPO_ON #undef FPO_ON #endif #endif // !_common_h_
1
dotnet/runtime
66,234
Remove compiler warning suppression
Contributes to https://github.com/dotnet/runtime/issues/66154 All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address. ~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~ Upstream PR: https://github.com/libunwind/libunwind/pull/333 /cc @GrabYourPitchforks
AaronRobinsonMSFT
2022-03-05T03:45:49Z
2022-03-09T00:57:12Z
220e67755ae6323a01011b83070dff6f84b02519
853e494abd1c1e12d28a76829b4680af7f694afa
Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154 All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address. ~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~ Upstream PR: https://github.com/libunwind/libunwind/pull/333 /cc @GrabYourPitchforks
./src/coreclr/vm/eehash.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // File: eehash.cpp // // #include "common.h" #include "excep.h" #include "eehash.h" #include "stringliteralmap.h" #include "clsload.hpp" #include "typectxt.h" #include "genericdict.h" // ============================================================================ // UTF8 string hash table helper. // ============================================================================ EEHashEntry_t * EEUtf8HashTableHelper::AllocateEntry(LPCUTF8 pKey, BOOL bDeepCopy, void *pHeap) { CONTRACTL { NOTHROW; GC_NOTRIGGER; INJECT_FAULT(return NULL;); } CONTRACTL_END EEHashEntry_t *pEntry; if (bDeepCopy) { DWORD StringLen = (DWORD)strlen(pKey); DWORD BufLen = 0; // Review conversion of size_t to DWORD. #ifdef _MSC_VER #pragma warning(push) #pragma warning(disable:4267) #endif if (!ClrSafeInt<DWORD>::addition(StringLen, SIZEOF_EEHASH_ENTRY + sizeof(LPUTF8) + 1, BufLen)) #ifdef _MSC_VER #pragma warning(pop) #endif return NULL; pEntry = (EEHashEntry_t *) new (nothrow) BYTE[BufLen]; if (!pEntry) return NULL; memcpy(pEntry->Key + sizeof(LPUTF8), pKey, StringLen + 1); *((LPUTF8*)pEntry->Key) = (LPUTF8)(pEntry->Key + sizeof(LPUTF8)); } else { pEntry = (EEHashEntry_t *) new (nothrow)BYTE[SIZEOF_EEHASH_ENTRY + sizeof(LPUTF8)]; if (pEntry) *((LPCUTF8*)pEntry->Key) = pKey; } return pEntry; } void EEUtf8HashTableHelper::DeleteEntry(EEHashEntry_t *pEntry, void *pHeap) { CONTRACTL { NOTHROW; GC_NOTRIGGER; FORBID_FAULT; } CONTRACTL_END delete [] (BYTE*)pEntry; } BOOL EEUtf8HashTableHelper::CompareKeys(EEHashEntry_t *pEntry, LPCUTF8 pKey) { LIMITED_METHOD_DAC_CONTRACT; LPCUTF8 pEntryKey = *((LPCUTF8*)pEntry->Key); return (strcmp(pEntryKey, pKey) == 0) ? TRUE : FALSE; } DWORD EEUtf8HashTableHelper::Hash(LPCUTF8 pKey) { LIMITED_METHOD_DAC_CONTRACT; DWORD dwHash = 0; while (*pKey != 0) { dwHash = (dwHash << 5) + (dwHash >> 5) + (*pKey); pKey++; } return dwHash; } LPCUTF8 EEUtf8HashTableHelper::GetKey(EEHashEntry_t *pEntry) { LIMITED_METHOD_CONTRACT; return *((LPCUTF8*)pEntry->Key); } #ifndef DACCESS_COMPILE // ============================================================================ // Unicode string hash table helper. // ============================================================================ EEHashEntry_t * EEUnicodeHashTableHelper::AllocateEntry(EEStringData *pKey, BOOL bDeepCopy, void *pHeap) { CONTRACTL { NOTHROW; GC_NOTRIGGER; INJECT_FAULT(return NULL;); } CONTRACTL_END EEHashEntry_t *pEntry; if (bDeepCopy) { pEntry = (EEHashEntry_t *) new (nothrow) BYTE[SIZEOF_EEHASH_ENTRY + sizeof(EEStringData) + ((pKey->GetCharCount() + 1) * sizeof(WCHAR))]; if (pEntry) { EEStringData *pEntryKey = (EEStringData *)(&pEntry->Key); pEntryKey->SetIsOnlyLowChars (pKey->GetIsOnlyLowChars()); pEntryKey->SetCharCount (pKey->GetCharCount()); pEntryKey->SetStringBuffer ((LPWSTR) ((LPBYTE)pEntry->Key + sizeof(EEStringData))); memcpy((LPWSTR)pEntryKey->GetStringBuffer(), pKey->GetStringBuffer(), pKey->GetCharCount() * sizeof(WCHAR)); } } else { pEntry = (EEHashEntry_t *) new (nothrow) BYTE[SIZEOF_EEHASH_ENTRY + sizeof(EEStringData)]; if (pEntry) { EEStringData *pEntryKey = (EEStringData *) pEntry->Key; pEntryKey->SetIsOnlyLowChars (pKey->GetIsOnlyLowChars()); pEntryKey->SetCharCount (pKey->GetCharCount()); pEntryKey->SetStringBuffer (pKey->GetStringBuffer()); } } return pEntry; } void EEUnicodeHashTableHelper::DeleteEntry(EEHashEntry_t *pEntry, void *pHeap) { LIMITED_METHOD_CONTRACT; delete [] (BYTE*)pEntry; } BOOL EEUnicodeHashTableHelper::CompareKeys(EEHashEntry_t *pEntry, EEStringData *pKey) { LIMITED_METHOD_CONTRACT; EEStringData *pEntryKey = (EEStringData*) pEntry->Key; // Same buffer, same string. if (pEntryKey->GetStringBuffer() == pKey->GetStringBuffer()) return TRUE; // Length not the same, never a match. if (pEntryKey->GetCharCount() != pKey->GetCharCount()) return FALSE; // Compare the entire thing. // We'll deliberately ignore the bOnlyLowChars field since this derived from the characters return !memcmp(pEntryKey->GetStringBuffer(), pKey->GetStringBuffer(), pEntryKey->GetCharCount() * sizeof(WCHAR)); } DWORD EEUnicodeHashTableHelper::Hash(EEStringData *pKey) { LIMITED_METHOD_CONTRACT; return (HashBytes((const BYTE *) pKey->GetStringBuffer(), pKey->GetCharCount()*sizeof(WCHAR))); } EEStringData *EEUnicodeHashTableHelper::GetKey(EEHashEntry_t *pEntry) { LIMITED_METHOD_CONTRACT; return (EEStringData*)pEntry->Key; } void EEUnicodeHashTableHelper::ReplaceKey(EEHashEntry_t *pEntry, EEStringData *pNewKey) { LIMITED_METHOD_CONTRACT; ((EEStringData*)pEntry->Key)->SetStringBuffer (pNewKey->GetStringBuffer()); ((EEStringData*)pEntry->Key)->SetCharCount (pNewKey->GetCharCount()); ((EEStringData*)pEntry->Key)->SetIsOnlyLowChars (pNewKey->GetIsOnlyLowChars()); } // ============================================================================ // Unicode stringliteral hash table helper. // ============================================================================ EEHashEntry_t * EEUnicodeStringLiteralHashTableHelper::AllocateEntry(EEStringData *pKey, BOOL bDeepCopy, void *pHeap) { CONTRACTL { NOTHROW; GC_NOTRIGGER; INJECT_FAULT(return NULL;); } CONTRACTL_END // We assert here because we expect that the heap is not null for EEUnicodeStringLiteralHash table. // If someone finds more uses of this kind of hashtable then remove this asserte. // Also note that in case of heap being null we go ahead and use new /delete which is EXPENSIVE // But for production code this might be ok if the memory is fragmented then thers a better chance // of getting smaller allocations than full pages. _ASSERTE (pHeap); if (pHeap) return (EEHashEntry_t *) ((MemoryPool*)pHeap)->AllocateElementNoThrow (); else return (EEHashEntry_t *) new (nothrow) BYTE[SIZEOF_EEHASH_ENTRY]; } void EEUnicodeStringLiteralHashTableHelper::DeleteEntry(EEHashEntry_t *pEntry, void *pHeap) { CONTRACTL { NOTHROW; GC_NOTRIGGER; FORBID_FAULT; } CONTRACTL_END // We assert here because we expect that the heap is not null for EEUnicodeStringLiteralHash table. // If someone finds more uses of this kind of hashtable then remove this asserte. // Also note that in case of heap being null we go ahead and use new /delete which is EXPENSIVE // But for production code this might be ok if the memory is fragmented then thers a better chance // of getting smaller allocations than full pages. _ASSERTE (pHeap); if (pHeap) ((MemoryPool*)pHeap)->FreeElement(pEntry); else delete [] (BYTE*)pEntry; } BOOL EEUnicodeStringLiteralHashTableHelper::CompareKeys(EEHashEntry_t *pEntry, EEStringData *pKey) { CONTRACTL { NOTHROW; GC_NOTRIGGER; FORBID_FAULT; } CONTRACTL_END GCX_COOP(); StringLiteralEntry *pHashData = (StringLiteralEntry *)pEntry->Data; EEStringData pEntryKey; pHashData->GetStringData(&pEntryKey); // Length not the same, never a match. if (pEntryKey.GetCharCount() != pKey->GetCharCount()) return FALSE; // Compare the entire thing. // We'll deliberately ignore the bOnlyLowChars field since this derived from the characters return (!memcmp(pEntryKey.GetStringBuffer(), pKey->GetStringBuffer(), pEntryKey.GetCharCount() * sizeof(WCHAR))); } DWORD EEUnicodeStringLiteralHashTableHelper::Hash(EEStringData *pKey) { LIMITED_METHOD_CONTRACT; return (HashBytes((const BYTE *) pKey->GetStringBuffer(), pKey->GetCharCount() * sizeof(WCHAR))); } // ============================================================================ // Instantiation hash table helper. // ============================================================================ EEHashEntry_t *EEInstantiationHashTableHelper::AllocateEntry(const SigTypeContext *pKey, BOOL bDeepCopy, AllocationHeap pHeap) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END EEHashEntry_t *pEntry = (EEHashEntry_t *) new (nothrow) BYTE[SIZEOF_EEHASH_ENTRY + sizeof(SigTypeContext)]; if (!pEntry) return NULL; *((SigTypeContext*)pEntry->Key) = *pKey; return pEntry; } void EEInstantiationHashTableHelper::DeleteEntry(EEHashEntry_t *pEntry, AllocationHeap pHeap) { LIMITED_METHOD_CONTRACT; delete [] (BYTE*)pEntry; } BOOL EEInstantiationHashTableHelper::CompareKeys(EEHashEntry_t *pEntry, const SigTypeContext *pKey) { LIMITED_METHOD_CONTRACT; SigTypeContext *pThis = (SigTypeContext*)&pEntry->Key; return SigTypeContext::Equal(pThis, pKey); } DWORD EEInstantiationHashTableHelper::Hash(const SigTypeContext *pKey) { LIMITED_METHOD_CONTRACT; DWORD dwHash = 5381; DWORD i; for (i = 0; i < pKey->m_classInst.GetNumArgs(); i++) dwHash = ((dwHash << 5) + dwHash) ^ (unsigned int)(SIZE_T)pKey->m_classInst[i].AsPtr(); for (i = 0; i < pKey->m_methodInst.GetNumArgs(); i++) dwHash = ((dwHash << 5) + dwHash) ^ (unsigned int)(SIZE_T)pKey->m_methodInst[i].AsPtr(); return dwHash; } const SigTypeContext *EEInstantiationHashTableHelper::GetKey(EEHashEntry_t *pEntry) { LIMITED_METHOD_CONTRACT; return (const SigTypeContext*)&pEntry->Key; } // ============================================================================ // ComComponentInfo hash table helper. // ============================================================================ EEHashEntry_t *EEClassFactoryInfoHashTableHelper::AllocateEntry(ClassFactoryInfo *pKey, BOOL bDeepCopy, void *pHeap) { CONTRACTL { NOTHROW; GC_NOTRIGGER; INJECT_FAULT(return NULL;); } CONTRACTL_END EEHashEntry_t *pEntry; S_SIZE_T cbStringLen = S_SIZE_T(0); _ASSERTE(bDeepCopy && "Non deep copy is not supported by the EEComCompInfoHashTableHelper"); if (pKey->m_strServerName) cbStringLen = (S_SIZE_T(wcslen(pKey->m_strServerName)) + S_SIZE_T(1)) * S_SIZE_T(sizeof(WCHAR)); S_SIZE_T cbEntry = S_SIZE_T(SIZEOF_EEHASH_ENTRY + sizeof(ClassFactoryInfo)) + cbStringLen; if (cbEntry.IsOverflow()) return NULL; _ASSERTE(!cbStringLen.IsOverflow()); pEntry = (EEHashEntry_t *) new (nothrow) BYTE[cbEntry.Value()]; if (pEntry) { memcpy(pEntry->Key + sizeof(ClassFactoryInfo), pKey->m_strServerName, cbStringLen.Value()); ((ClassFactoryInfo*)pEntry->Key)->m_strServerName = pKey->m_strServerName ? (WCHAR*)(pEntry->Key + sizeof(ClassFactoryInfo)) : NULL; ((ClassFactoryInfo*)pEntry->Key)->m_clsid = pKey->m_clsid; } return pEntry; } void EEClassFactoryInfoHashTableHelper::DeleteEntry(EEHashEntry_t *pEntry, void *pHeap) { LIMITED_METHOD_CONTRACT; delete [] (BYTE*) pEntry; } BOOL EEClassFactoryInfoHashTableHelper::CompareKeys(EEHashEntry_t *pEntry, ClassFactoryInfo *pKey) { LIMITED_METHOD_CONTRACT; // First check the GUIDs. if (((ClassFactoryInfo*)pEntry->Key)->m_clsid != pKey->m_clsid) return FALSE; // Next do a trivial comparition on the server name pointer values. if (((ClassFactoryInfo*)pEntry->Key)->m_strServerName == pKey->m_strServerName) return TRUE; // If the pointers are not equal then if one is NULL then the server names are different. if (!((ClassFactoryInfo*)pEntry->Key)->m_strServerName || !pKey->m_strServerName) return FALSE; // Finally do a string comparition of the server names. return wcscmp(((ClassFactoryInfo*)pEntry->Key)->m_strServerName, pKey->m_strServerName) == 0; } DWORD EEClassFactoryInfoHashTableHelper::Hash(ClassFactoryInfo *pKey) { LIMITED_METHOD_CONTRACT; DWORD dwHash = 0; BYTE *pGuidData = (BYTE*)&pKey->m_clsid; for (unsigned int i = 0; i < sizeof(GUID); i++) { dwHash = (dwHash << 5) + (dwHash >> 5) + (*pGuidData); pGuidData++; } if (pKey->m_strServerName) { PCWSTR pSrvNameData = pKey->m_strServerName; while (*pSrvNameData != 0) { dwHash = (dwHash << 5) + (dwHash >> 5) + (*pSrvNameData); pSrvNameData++; } } return dwHash; } ClassFactoryInfo *EEClassFactoryInfoHashTableHelper::GetKey(EEHashEntry_t *pEntry) { LIMITED_METHOD_CONTRACT; return (ClassFactoryInfo*)pEntry->Key; } #endif // !DACCESS_COMPILE
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // File: eehash.cpp // // #include "common.h" #include "excep.h" #include "eehash.h" #include "stringliteralmap.h" #include "clsload.hpp" #include "typectxt.h" #include "genericdict.h" // ============================================================================ // UTF8 string hash table helper. // ============================================================================ EEHashEntry_t * EEUtf8HashTableHelper::AllocateEntry(LPCUTF8 pKey, BOOL bDeepCopy, void *pHeap) { CONTRACTL { NOTHROW; GC_NOTRIGGER; INJECT_FAULT(return NULL;); } CONTRACTL_END EEHashEntry_t *pEntry; if (bDeepCopy) { SIZE_T StringLen = strlen(pKey); SIZE_T BufLen = 0; if (!ClrSafeInt<SIZE_T>::addition(StringLen, SIZEOF_EEHASH_ENTRY + sizeof(LPUTF8) + 1, BufLen)) return NULL; pEntry = (EEHashEntry_t *) new (nothrow) BYTE[BufLen]; if (!pEntry) return NULL; memcpy(pEntry->Key + sizeof(LPUTF8), pKey, StringLen + 1); *((LPUTF8*)pEntry->Key) = (LPUTF8)(pEntry->Key + sizeof(LPUTF8)); } else { pEntry = (EEHashEntry_t *) new (nothrow)BYTE[SIZEOF_EEHASH_ENTRY + sizeof(LPUTF8)]; if (pEntry) *((LPCUTF8*)pEntry->Key) = pKey; } return pEntry; } void EEUtf8HashTableHelper::DeleteEntry(EEHashEntry_t *pEntry, void *pHeap) { CONTRACTL { NOTHROW; GC_NOTRIGGER; FORBID_FAULT; } CONTRACTL_END delete [] (BYTE*)pEntry; } BOOL EEUtf8HashTableHelper::CompareKeys(EEHashEntry_t *pEntry, LPCUTF8 pKey) { LIMITED_METHOD_DAC_CONTRACT; LPCUTF8 pEntryKey = *((LPCUTF8*)pEntry->Key); return (strcmp(pEntryKey, pKey) == 0) ? TRUE : FALSE; } DWORD EEUtf8HashTableHelper::Hash(LPCUTF8 pKey) { LIMITED_METHOD_DAC_CONTRACT; DWORD dwHash = 0; while (*pKey != 0) { dwHash = (dwHash << 5) + (dwHash >> 5) + (*pKey); pKey++; } return dwHash; } LPCUTF8 EEUtf8HashTableHelper::GetKey(EEHashEntry_t *pEntry) { LIMITED_METHOD_CONTRACT; return *((LPCUTF8*)pEntry->Key); } #ifndef DACCESS_COMPILE // ============================================================================ // Unicode string hash table helper. // ============================================================================ EEHashEntry_t * EEUnicodeHashTableHelper::AllocateEntry(EEStringData *pKey, BOOL bDeepCopy, void *pHeap) { CONTRACTL { NOTHROW; GC_NOTRIGGER; INJECT_FAULT(return NULL;); } CONTRACTL_END EEHashEntry_t *pEntry; if (bDeepCopy) { pEntry = (EEHashEntry_t *) new (nothrow) BYTE[SIZEOF_EEHASH_ENTRY + sizeof(EEStringData) + ((pKey->GetCharCount() + 1) * sizeof(WCHAR))]; if (pEntry) { EEStringData *pEntryKey = (EEStringData *)(&pEntry->Key); pEntryKey->SetIsOnlyLowChars (pKey->GetIsOnlyLowChars()); pEntryKey->SetCharCount (pKey->GetCharCount()); pEntryKey->SetStringBuffer ((LPWSTR) ((LPBYTE)pEntry->Key + sizeof(EEStringData))); memcpy((LPWSTR)pEntryKey->GetStringBuffer(), pKey->GetStringBuffer(), pKey->GetCharCount() * sizeof(WCHAR)); } } else { pEntry = (EEHashEntry_t *) new (nothrow) BYTE[SIZEOF_EEHASH_ENTRY + sizeof(EEStringData)]; if (pEntry) { EEStringData *pEntryKey = (EEStringData *) pEntry->Key; pEntryKey->SetIsOnlyLowChars (pKey->GetIsOnlyLowChars()); pEntryKey->SetCharCount (pKey->GetCharCount()); pEntryKey->SetStringBuffer (pKey->GetStringBuffer()); } } return pEntry; } void EEUnicodeHashTableHelper::DeleteEntry(EEHashEntry_t *pEntry, void *pHeap) { LIMITED_METHOD_CONTRACT; delete [] (BYTE*)pEntry; } BOOL EEUnicodeHashTableHelper::CompareKeys(EEHashEntry_t *pEntry, EEStringData *pKey) { LIMITED_METHOD_CONTRACT; EEStringData *pEntryKey = (EEStringData*) pEntry->Key; // Same buffer, same string. if (pEntryKey->GetStringBuffer() == pKey->GetStringBuffer()) return TRUE; // Length not the same, never a match. if (pEntryKey->GetCharCount() != pKey->GetCharCount()) return FALSE; // Compare the entire thing. // We'll deliberately ignore the bOnlyLowChars field since this derived from the characters return !memcmp(pEntryKey->GetStringBuffer(), pKey->GetStringBuffer(), pEntryKey->GetCharCount() * sizeof(WCHAR)); } DWORD EEUnicodeHashTableHelper::Hash(EEStringData *pKey) { LIMITED_METHOD_CONTRACT; return (HashBytes((const BYTE *) pKey->GetStringBuffer(), pKey->GetCharCount()*sizeof(WCHAR))); } EEStringData *EEUnicodeHashTableHelper::GetKey(EEHashEntry_t *pEntry) { LIMITED_METHOD_CONTRACT; return (EEStringData*)pEntry->Key; } void EEUnicodeHashTableHelper::ReplaceKey(EEHashEntry_t *pEntry, EEStringData *pNewKey) { LIMITED_METHOD_CONTRACT; ((EEStringData*)pEntry->Key)->SetStringBuffer (pNewKey->GetStringBuffer()); ((EEStringData*)pEntry->Key)->SetCharCount (pNewKey->GetCharCount()); ((EEStringData*)pEntry->Key)->SetIsOnlyLowChars (pNewKey->GetIsOnlyLowChars()); } // ============================================================================ // Unicode stringliteral hash table helper. // ============================================================================ EEHashEntry_t * EEUnicodeStringLiteralHashTableHelper::AllocateEntry(EEStringData *pKey, BOOL bDeepCopy, void *pHeap) { CONTRACTL { NOTHROW; GC_NOTRIGGER; INJECT_FAULT(return NULL;); } CONTRACTL_END // We assert here because we expect that the heap is not null for EEUnicodeStringLiteralHash table. // If someone finds more uses of this kind of hashtable then remove this asserte. // Also note that in case of heap being null we go ahead and use new /delete which is EXPENSIVE // But for production code this might be ok if the memory is fragmented then thers a better chance // of getting smaller allocations than full pages. _ASSERTE (pHeap); if (pHeap) return (EEHashEntry_t *) ((MemoryPool*)pHeap)->AllocateElementNoThrow (); else return (EEHashEntry_t *) new (nothrow) BYTE[SIZEOF_EEHASH_ENTRY]; } void EEUnicodeStringLiteralHashTableHelper::DeleteEntry(EEHashEntry_t *pEntry, void *pHeap) { CONTRACTL { NOTHROW; GC_NOTRIGGER; FORBID_FAULT; } CONTRACTL_END // We assert here because we expect that the heap is not null for EEUnicodeStringLiteralHash table. // If someone finds more uses of this kind of hashtable then remove this asserte. // Also note that in case of heap being null we go ahead and use new /delete which is EXPENSIVE // But for production code this might be ok if the memory is fragmented then thers a better chance // of getting smaller allocations than full pages. _ASSERTE (pHeap); if (pHeap) ((MemoryPool*)pHeap)->FreeElement(pEntry); else delete [] (BYTE*)pEntry; } BOOL EEUnicodeStringLiteralHashTableHelper::CompareKeys(EEHashEntry_t *pEntry, EEStringData *pKey) { CONTRACTL { NOTHROW; GC_NOTRIGGER; FORBID_FAULT; } CONTRACTL_END GCX_COOP(); StringLiteralEntry *pHashData = (StringLiteralEntry *)pEntry->Data; EEStringData pEntryKey; pHashData->GetStringData(&pEntryKey); // Length not the same, never a match. if (pEntryKey.GetCharCount() != pKey->GetCharCount()) return FALSE; // Compare the entire thing. // We'll deliberately ignore the bOnlyLowChars field since this derived from the characters return (!memcmp(pEntryKey.GetStringBuffer(), pKey->GetStringBuffer(), pEntryKey.GetCharCount() * sizeof(WCHAR))); } DWORD EEUnicodeStringLiteralHashTableHelper::Hash(EEStringData *pKey) { LIMITED_METHOD_CONTRACT; return (HashBytes((const BYTE *) pKey->GetStringBuffer(), pKey->GetCharCount() * sizeof(WCHAR))); } // ============================================================================ // Instantiation hash table helper. // ============================================================================ EEHashEntry_t *EEInstantiationHashTableHelper::AllocateEntry(const SigTypeContext *pKey, BOOL bDeepCopy, AllocationHeap pHeap) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END EEHashEntry_t *pEntry = (EEHashEntry_t *) new (nothrow) BYTE[SIZEOF_EEHASH_ENTRY + sizeof(SigTypeContext)]; if (!pEntry) return NULL; *((SigTypeContext*)pEntry->Key) = *pKey; return pEntry; } void EEInstantiationHashTableHelper::DeleteEntry(EEHashEntry_t *pEntry, AllocationHeap pHeap) { LIMITED_METHOD_CONTRACT; delete [] (BYTE*)pEntry; } BOOL EEInstantiationHashTableHelper::CompareKeys(EEHashEntry_t *pEntry, const SigTypeContext *pKey) { LIMITED_METHOD_CONTRACT; SigTypeContext *pThis = (SigTypeContext*)&pEntry->Key; return SigTypeContext::Equal(pThis, pKey); } DWORD EEInstantiationHashTableHelper::Hash(const SigTypeContext *pKey) { LIMITED_METHOD_CONTRACT; DWORD dwHash = 5381; DWORD i; for (i = 0; i < pKey->m_classInst.GetNumArgs(); i++) dwHash = ((dwHash << 5) + dwHash) ^ (unsigned int)(SIZE_T)pKey->m_classInst[i].AsPtr(); for (i = 0; i < pKey->m_methodInst.GetNumArgs(); i++) dwHash = ((dwHash << 5) + dwHash) ^ (unsigned int)(SIZE_T)pKey->m_methodInst[i].AsPtr(); return dwHash; } const SigTypeContext *EEInstantiationHashTableHelper::GetKey(EEHashEntry_t *pEntry) { LIMITED_METHOD_CONTRACT; return (const SigTypeContext*)&pEntry->Key; } // ============================================================================ // ComComponentInfo hash table helper. // ============================================================================ EEHashEntry_t *EEClassFactoryInfoHashTableHelper::AllocateEntry(ClassFactoryInfo *pKey, BOOL bDeepCopy, void *pHeap) { CONTRACTL { NOTHROW; GC_NOTRIGGER; INJECT_FAULT(return NULL;); } CONTRACTL_END EEHashEntry_t *pEntry; S_SIZE_T cbStringLen = S_SIZE_T(0); _ASSERTE(bDeepCopy && "Non deep copy is not supported by the EEComCompInfoHashTableHelper"); if (pKey->m_strServerName) cbStringLen = (S_SIZE_T(wcslen(pKey->m_strServerName)) + S_SIZE_T(1)) * S_SIZE_T(sizeof(WCHAR)); S_SIZE_T cbEntry = S_SIZE_T(SIZEOF_EEHASH_ENTRY + sizeof(ClassFactoryInfo)) + cbStringLen; if (cbEntry.IsOverflow()) return NULL; _ASSERTE(!cbStringLen.IsOverflow()); pEntry = (EEHashEntry_t *) new (nothrow) BYTE[cbEntry.Value()]; if (pEntry) { memcpy(pEntry->Key + sizeof(ClassFactoryInfo), pKey->m_strServerName, cbStringLen.Value()); ((ClassFactoryInfo*)pEntry->Key)->m_strServerName = pKey->m_strServerName ? (WCHAR*)(pEntry->Key + sizeof(ClassFactoryInfo)) : NULL; ((ClassFactoryInfo*)pEntry->Key)->m_clsid = pKey->m_clsid; } return pEntry; } void EEClassFactoryInfoHashTableHelper::DeleteEntry(EEHashEntry_t *pEntry, void *pHeap) { LIMITED_METHOD_CONTRACT; delete [] (BYTE*) pEntry; } BOOL EEClassFactoryInfoHashTableHelper::CompareKeys(EEHashEntry_t *pEntry, ClassFactoryInfo *pKey) { LIMITED_METHOD_CONTRACT; // First check the GUIDs. if (((ClassFactoryInfo*)pEntry->Key)->m_clsid != pKey->m_clsid) return FALSE; // Next do a trivial comparition on the server name pointer values. if (((ClassFactoryInfo*)pEntry->Key)->m_strServerName == pKey->m_strServerName) return TRUE; // If the pointers are not equal then if one is NULL then the server names are different. if (!((ClassFactoryInfo*)pEntry->Key)->m_strServerName || !pKey->m_strServerName) return FALSE; // Finally do a string comparition of the server names. return wcscmp(((ClassFactoryInfo*)pEntry->Key)->m_strServerName, pKey->m_strServerName) == 0; } DWORD EEClassFactoryInfoHashTableHelper::Hash(ClassFactoryInfo *pKey) { LIMITED_METHOD_CONTRACT; DWORD dwHash = 0; BYTE *pGuidData = (BYTE*)&pKey->m_clsid; for (unsigned int i = 0; i < sizeof(GUID); i++) { dwHash = (dwHash << 5) + (dwHash >> 5) + (*pGuidData); pGuidData++; } if (pKey->m_strServerName) { PCWSTR pSrvNameData = pKey->m_strServerName; while (*pSrvNameData != 0) { dwHash = (dwHash << 5) + (dwHash >> 5) + (*pSrvNameData); pSrvNameData++; } } return dwHash; } ClassFactoryInfo *EEClassFactoryInfoHashTableHelper::GetKey(EEHashEntry_t *pEntry) { LIMITED_METHOD_CONTRACT; return (ClassFactoryInfo*)pEntry->Key; } #endif // !DACCESS_COMPILE
1
dotnet/runtime
66,234
Remove compiler warning suppression
Contributes to https://github.com/dotnet/runtime/issues/66154 All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address. ~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~ Upstream PR: https://github.com/libunwind/libunwind/pull/333 /cc @GrabYourPitchforks
AaronRobinsonMSFT
2022-03-05T03:45:49Z
2022-03-09T00:57:12Z
220e67755ae6323a01011b83070dff6f84b02519
853e494abd1c1e12d28a76829b4680af7f694afa
Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154 All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address. ~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~ Upstream PR: https://github.com/libunwind/libunwind/pull/333 /cc @GrabYourPitchforks
./src/coreclr/vm/fcall.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // FCall.H // // // FCall is a high-performance alternative to ECall. Unlike ECall, FCall // methods do not necessarily create a frame. Jitted code calls directly // to the FCall entry point. It is possible to do operations that need // to have a frame within an FCall, you need to manually set up the frame // before you do such operations. // It is illegal to cause a GC or EH to happen in an FCALL before setting // up a frame. To prevent accidentally violating this rule, FCALLs turn // on BEGINGCFORBID, which insures that these things can't happen in a // checked build without causing an ASSERTE. Once you set up a frame, // this state is turned off as long as the frame is active, and then is // turned on again when the frame is torn down. This mechanism should // be sufficient to insure that the rules are followed. // In general you set up a frame by using the following macros // HELPER_METHOD_FRAME_BEGIN_RET*() // Use If the FCALL has a return value // HELPER_METHOD_FRAME_BEGIN*() // Use If FCALL does not return a value // HELPER_METHOD_FRAME_END*() // These macros introduce a scope which is protected by an HelperMethodFrame. // In this scope you can do EH or GC. There are rules associated with // their use. In particular // 1) These macros can only be used in the body of a FCALL (that is // something using the FCIMPL* or HCIMPL* macros for their decaration. // 2) You may not perform a 'return' within this scope.. // Compile time errors occur if you try to violate either of these rules. // The frame that is set up does NOT protect any GC variables (in particular the // arguments of the FCALL. Thus you need to do an explicit GCPROTECT once the // frame is established if you need to protect an argument. There are flavors // of HELPER_METHOD_FRAME that protect a certain number of GC variables. For // example // HELPER_METHOD_FRAME_BEGIN_RET_2(arg1, arg2) // will protect the GC variables arg1, and arg2 as well as erecting the frame. // Another invariant that you must be aware of is the need to poll to see if // a GC is needed by some other thread. Unless the FCALL is VERY short, // every code path through the FCALL must do such a poll. The important // thing here is that a poll will cause a GC, and thus you can only do it // when all you GC variables are protected. To make things easier // HELPER_METHOD_FRAMES that protect things automatically do this poll. // If you don't need to protect anything HELPER_METHOD_FRAME_BEGIN_0 // will also do the poll. // Sometimes it is convenient to do the poll a the end of the frame, you // can use HELPER_METHOD_FRAME_BEGIN_NOPOLL and HELPER_METHOD_FRAME_END_POLL // to do the poll at the end. If somewhere in the middle is the best // place you can do that too with HELPER_METHOD_POLL() // You don't need to erect a helper method frame to do a poll. FC_GC_POLL // can do this (remember all your GC refs will be trashed). // Finally if your method is VERY small, you can get away without a poll, // you have to use FC_GC_POLL_NOT_NEEDED to mark this. // Use sparingly! // It is possible to set up the frame as the first operation in the FCALL and // tear it down as the last operation before returning. This works and is // reasonably efficient (as good as an ECall), however, if it is the case that // you can defer the setup of the frame to an unlikely code path (exception path) // that is much better. // If you defer setup of the frame, all codepaths leading to the frame setup // must be wrapped with PERMIT_HELPER_METHOD_FRAME_BEGIN/END. These block // certain compiler optimizations that interfere with the delayed frame setup. // These macros are automatically included in the HCIMPL, FCIMPL, and frame // setup macros. // <TODO>TODO: we should have a way of doing a trial allocation (an allocation that // will fail if it would cause a GC). That way even FCALLs that need to allocate // would not necessarily need to set up a frame. </TODO> // It is common to only need to set up a frame in order to throw an exception. // While this can be done by doing // HELPER_METHOD_FRAME_BEGIN() // Use if FCALL does not return a value // COMPlusThrow(execpt); // HELPER_METHOD_FRAME_END() // It is more efficient (in space) to use convenience macro FCTHROW that does // this for you (sets up a frame, and does the throw). // FCTHROW(except) // Since FCALLS have to conform to the EE calling conventions and not to C // calling conventions, FCALLS, need to be declared using special macros (FCIMPL*) // that implement the correct calling conventions. There are variants of these // macros depending on the number of args, and sometimes the types of the // arguments. //------------------------------------------------------------------------ // A very simple example: // // FCIMPL2(INT32, Div, INT32 x, INT32 y) // { // if (y == 0) // FCThrow(kDivideByZeroException); // return x/y; // } // FCIMPLEND // // // *** WATCH OUT FOR THESE GOTCHAS: *** // ------------------------------------ // - In your FCDECL & FCIMPL protos, don't declare a param as type OBJECTREF // or any of its deriveds. This will break on the checked build because // __fastcall doesn't enregister C++ objects (which OBJECTREF is). // Instead, you need to do something like; // // FCIMPL(.., .., Object* pObject0) // OBJECTREF pObject = ObjectToOBJECTREF(pObject0); // FCIMPL // // For similar reasons, use Object* rather than OBJECTREF as a return type. // Consider either using ObjectToOBJECTREF or calling VALIDATEOBJECTREF // to make sure your Object* is valid. // // - FCThrow() must be called directly from your FCall impl function: it // cannot be called from a subfunction. Calling from a subfunction breaks // the VC code parsing workaround that lets us recover the callee saved registers. // Fortunately, you'll get a compile error complaining about an // unknown variable "__me". // // - If your FCall returns VOID, you must use FCThrowVoid() rather than // FCThrow(). This is because FCThrow() has to generate an unexecuted // "return" statement for the code parser. // // - On x86, if first and/or second argument of your FCall cannot be passed // in either of the __fastcall registers (ECX/EDX), you must use "V" versions // of FCDECL and FCIMPL macros to enregister arguments correctly. Some of the // most common types that fit this requirement are 64-bit values (i.e. INT64 or // UINT64) and floating-point values (i.e. FLOAT or DOUBLE). For example, FCDECL3_IVI // must be used for FCalls that take 3 arguments and 2nd argument is INT64 and // FDECL2_VV must be used for FCalls that take 2 arguments where both are FLOAT. // // - You may use structs for protecting multiple OBJECTREF's simultaneously. // In these cases, you must use a variant of a helper method frame with PROTECT // in the name, to ensure all the OBJECTREF's in the struct get protected. // Also, initialize all the OBJECTREF's first. Like this: // // FCIMPL4(Object*, COMNlsInfo::nativeChangeCaseString, LocaleIDObject* localeUNSAFE, // INT_PTR pNativeTextInfo, StringObject* pStringUNSAFE, CLR_BOOL bIsToUpper) // { // [ignoring CONTRACT for now] // struct _gc // { // STRINGREF pResult; // STRINGREF pString; // LOCALEIDREF pLocale; // } gc; // gc.pResult = NULL; // gc.pString = ObjectToSTRINGREF(pStringUNSAFE); // gc.pLocale = (LOCALEIDREF)ObjectToOBJECTREF(localeUNSAFE); // // HELPER_METHOD_FRAME_BEGIN_RET_PROTECT(gc) // // If you forgot the PROTECT part, the macro will only protect the first OBJECTREF, // introducing a subtle GC hole in your code. Fortunately, we now issue a // compile-time error if you forget. // How FCall works: // ---------------- // An FCall target uses __fastcall or some other calling convention to // match the IL calling convention exactly. Thus, a call to FCall is a direct // call to the target w/ no intervening stub or frame. // // The tricky part is when FCThrow is called. FCThrow must generate // a proper method frame before allocating and throwing the exception. // To do this, it must recover several things: // // - The location of the FCIMPL's return address (since that's // where the frame will be based.) // // - The on-entry values of the callee-saved regs; which must // be recorded in the frame so that GC can update them. // Depending on how VC compiles your FCIMPL, those values are still // in the original registers or saved on the stack. // // To figure out which, FCThrow() generates the code: // // while (NULL == __FCThrow(__me, ...)) {}; // return 0; // // The "return" statement will never execute; but its presence guarantees // that VC will follow the __FCThrow() call with a VC epilog // that restores the callee-saved registers using a pretty small // and predictable set of Intel opcodes. __FCThrow() parses this // epilog and simulates its execution to recover the callee saved // registers. // // The while loop is to prevent the compiler from doing tail call optimizations. // The helper frame interpretter needs the frame to be present. // // - The MethodDesc* that this FCall implements. This MethodDesc* // is part of the frame and ensures that the FCall will appear // in the exception's stack trace. To get this, FCDECL declares // a static local __me, initialized to point to the FC target itself. // This address is exactly what's stored in the ECall lookup tables; // so __FCThrow() simply does a reverse lookup on that table to recover // the MethodDesc*. // #ifndef __FCall_h__ #define __FCall_h__ #include "gms.h" #include "runtimeexceptionkind.h" #include "debugreturn.h" //============================================================================================== // These macros defeat compiler optimizations that might mix nonvolatile // register loads and stores with other code in the function body. This // creates problems for the frame setup code, which assumes that any // nonvolatiles that are saved at the point of the frame setup will be // re-loaded when the frame is popped. // // Currently this is only known to be an issue on AMD64. It's uncertain // whether it is an issue on x86. //============================================================================================== #if defined(TARGET_AMD64) && !defined(TARGET_UNIX) // // On AMD64 this is accomplished by including a setjmp anywhere in a function. // Doesn't matter whether it is reachable or not, and in fact in optimized // builds the setjmp is removed altogether. // #include <setjmp.h> // // Use of setjmp is temporary, we will eventually have compiler intrinsics to // disable the optimizations. Besides, we don't actually execute setjmp in // these macros (or anywhere else in the VM on AMD64). // #pragma warning(disable:4611) // interaction between '_setjmp' and C++ object destruction is non-portable #ifdef _DEBUG // // Linked list of unmanaged methods preceeding a HelperMethodFrame push. This // is linked onto the current Thread. Each list entry is stack-allocated so it // can be associated with an unmanaged frame. Each unmanaged frame needs to be // associated with at least one list entry. // struct HelperMethodFrameCallerList { HelperMethodFrameCallerList *pCaller; }; #endif // _DEBUG // // Resets the Thread state at a new managed -> fcall transition. // class FCallTransitionState { public: FCallTransitionState () NOT_DEBUG({ LIMITED_METHOD_CONTRACT; }); ~FCallTransitionState () NOT_DEBUG({ LIMITED_METHOD_CONTRACT; }); #ifdef _DEBUG private: Thread *m_pThread; HelperMethodFrameCallerList *m_pPreviousHelperMethodFrameCallerList; #endif // _DEBUG }; // // Pushes/pops state for each caller. // class PermitHelperMethodFrameState { public: PermitHelperMethodFrameState () NOT_DEBUG({ LIMITED_METHOD_CONTRACT; }); ~PermitHelperMethodFrameState () NOT_DEBUG({ LIMITED_METHOD_CONTRACT; }); static VOID CheckHelperMethodFramePermitted () NOT_DEBUG({ LIMITED_METHOD_CONTRACT; }); #ifdef _DEBUG private: Thread *m_pThread; HelperMethodFrameCallerList m_ListEntry; #endif // _DEBUG }; // // Resets the Thread state after the HelperMethodFrame is pushed. At this // point, the HelperMethodFrame is capable of unwinding to the managed code, // so we can reset the Thread state for any nested fcalls. // class CompletedFCallTransitionState { public: CompletedFCallTransitionState () NOT_DEBUG({ LIMITED_METHOD_CONTRACT; }); ~CompletedFCallTransitionState () NOT_DEBUG({ LIMITED_METHOD_CONTRACT; }); #ifdef _DEBUG private: HelperMethodFrameCallerList *m_pLastHelperMethodFrameCallerList; #endif // _DEBUG }; #define PERMIT_HELPER_METHOD_FRAME_BEGIN() \ if (1) \ { \ PermitHelperMethodFrameState ___PermitHelperMethodFrameState; #define PERMIT_HELPER_METHOD_FRAME_END() \ } \ else \ { \ jmp_buf ___jmpbuf; \ setjmp(___jmpbuf); \ __assume(0); \ } #define FCALL_TRANSITION_BEGIN() \ FCallTransitionState ___FCallTransitionState; \ PERMIT_HELPER_METHOD_FRAME_BEGIN(); #define FCALL_TRANSITION_END() \ PERMIT_HELPER_METHOD_FRAME_END(); #define CHECK_HELPER_METHOD_FRAME_PERMITTED() \ PermitHelperMethodFrameState::CheckHelperMethodFramePermitted(); \ CompletedFCallTransitionState ___CompletedFCallTransitionState; #else // unsupported processor #define PERMIT_HELPER_METHOD_FRAME_BEGIN() #define PERMIT_HELPER_METHOD_FRAME_END() #define FCALL_TRANSITION_BEGIN() #define FCALL_TRANSITION_END() #define CHECK_HELPER_METHOD_FRAME_PERMITTED() #endif // unsupported processor //============================================================================================== // This is where FCThrow ultimately ends up. Never call this directly. // Use the FCThrow() macros. __FCThrowArgument is the helper to throw ArgumentExceptions // with a resource taken from the managed resource manager. //============================================================================================== LPVOID __FCThrow(LPVOID me, enum RuntimeExceptionKind reKind, UINT resID, LPCWSTR arg1, LPCWSTR arg2, LPCWSTR arg3); LPVOID __FCThrowArgument(LPVOID me, enum RuntimeExceptionKind reKind, LPCWSTR argumentName, LPCWSTR resourceName); //============================================================================================== // FDECLn: A set of macros for generating header declarations for FC targets. // Use FIMPLn for the actual body. //============================================================================================== // Note: on the x86, these defs reverse all but the first two arguments // (IL stack calling convention is reversed from __fastcall.) // Calling convention for varargs #define F_CALL_VA_CONV __cdecl #ifdef TARGET_X86 // Choose the appropriate calling convention for FCALL helpers on the basis of the JIT calling convention #ifdef __GNUC__ #define F_CALL_CONV __attribute__((cdecl, regparm(3))) // GCC FCALL convention (simulated via cdecl, regparm(3)) is different from MSVC FCALL convention. GCC can use up // to 3 registers to store parameters. The registers used are EAX, EDX, ECX. Dummy parameters and reordering // of the actual parameters in the FCALL signature is used to make the calling convention to look like in MSVC. #define SWIZZLE_REGARG_ORDER #else // __GNUC__ #define F_CALL_CONV __fastcall #endif // !__GNUC__ #define SWIZZLE_STKARG_ORDER #else // TARGET_X86 // // non-x86 platforms don't have messed-up calling convention swizzling // #define F_CALL_CONV #endif // !TARGET_X86 #ifdef SWIZZLE_STKARG_ORDER #ifdef SWIZZLE_REGARG_ORDER #define FCDECL0(rettype, funcname) rettype F_CALL_CONV funcname() #define FCDECL1(rettype, funcname, a1) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, a1) #define FCDECL1_V(rettype, funcname, a1) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, int /* ECX */, a1) #define FCDECL2(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1) #define FCDECL2VA(rettype, funcname, a1, a2) rettype F_CALL_VA_CONV funcname(a1, a2, ...) #define FCDECL2_VV(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, int /* ECX */, a2, a1) #define FCDECL2_VI(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, a2, a1) #define FCDECL2_IV(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, a1, a2) #define FCDECL3(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a3) #define FCDECL3_IIV(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a3) #define FCDECL3_VII(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(int /* EAX */, a3, a2, a1) #define FCDECL3_IVV(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, a1, a3, a2) #define FCDECL3_IVI(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(int /* EAX */, a3, a1, a2) #define FCDECL3_VVI(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, a3, a2, a1) #define FCDECL3_VVV(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, int /* ECX */, a3, a2, a1) #define FCDECL4(rettype, funcname, a1, a2, a3, a4) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a4, a3) #define FCDECL5(rettype, funcname, a1, a2, a3, a4, a5) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a5, a4, a3) #define FCDECL6(rettype, funcname, a1, a2, a3, a4, a5, a6) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a6, a5, a4, a3) #define FCDECL7(rettype, funcname, a1, a2, a3, a4, a5, a6, a7) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a7, a6, a5, a4, a3) #define FCDECL8(rettype, funcname, a1, a2, a3, a4, a5, a6, a7, a8) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a8, a7, a6, a5, a4, a3) #define FCDECL9(rettype, funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a9, a8, a7, a6, a5, a4, a3) #define FCDECL10(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a10, a9, a8, a7, a6, a5, a4, a3) #define FCDECL11(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a11, a10, a9, a8, a7, a6, a5, a4, a3) #define FCDECL12(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a12, a11, a10, a9, a8, a7, a6, a5, a4, a3) #define FCDECL13(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a13, a12, a11, a10, a9, a8, a7, a6, a5, a4, a3) #define FCDECL14(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a14, a13, a12, a11, a10, a9, a8, a7, a6, a5, a4, a3) #define FCDECL5_IVI(rettype, funcname, a1, a2, a3, a4, a5) rettype F_CALL_CONV funcname(int /* EAX */, a3, a1, a5, a4, a2) #define FCDECL5_VII(rettype, funcname, a1, a2, a3, a4, a5) rettype F_CALL_CONV funcname(int /* EAX */, a3, a2, a5, a4, a1) #else // SWIZZLE_REGARG_ORDER #define FCDECL0(rettype, funcname) rettype F_CALL_CONV funcname() #define FCDECL1(rettype, funcname, a1) rettype F_CALL_CONV funcname(a1) #define FCDECL1_V(rettype, funcname, a1) rettype F_CALL_CONV funcname(a1) #define FCDECL2(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(a1, a2) #define FCDECL2VA(rettype, funcname, a1, a2) rettype F_CALL_VA_CONV funcname(a1, a2, ...) #define FCDECL2_VV(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(a2, a1) #define FCDECL2_VI(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(a2, a1) #define FCDECL2_IV(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(a1, a2) #define FCDECL3(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(a1, a2, a3) #define FCDECL3_IIV(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(a1, a2, a3) #define FCDECL3_VII(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(a2, a3, a1) #define FCDECL3_IVV(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(a1, a3, a2) #define FCDECL3_IVI(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(a1, a3, a2) #define FCDECL3_VVI(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(a2, a1, a3) #define FCDECL3_VVV(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(a3, a2, a1) #define FCDECL4(rettype, funcname, a1, a2, a3, a4) rettype F_CALL_CONV funcname(a1, a2, a4, a3) #define FCDECL5(rettype, funcname, a1, a2, a3, a4, a5) rettype F_CALL_CONV funcname(a1, a2, a5, a4, a3) #define FCDECL6(rettype, funcname, a1, a2, a3, a4, a5, a6) rettype F_CALL_CONV funcname(a1, a2, a6, a5, a4, a3) #define FCDECL7(rettype, funcname, a1, a2, a3, a4, a5, a6, a7) rettype F_CALL_CONV funcname(a1, a2, a7, a6, a5, a4, a3) #define FCDECL8(rettype, funcname, a1, a2, a3, a4, a5, a6, a7, a8) rettype F_CALL_CONV funcname(a1, a2, a8, a7, a6, a5, a4, a3) #define FCDECL9(rettype, funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9) rettype F_CALL_CONV funcname(a1, a2, a9, a8, a7, a6, a5, a4, a3) #define FCDECL10(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10) rettype F_CALL_CONV funcname(a1, a2, a10, a9, a8, a7, a6, a5, a4, a3) #define FCDECL11(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11) rettype F_CALL_CONV funcname(a1, a2, a11, a10, a9, a8, a7, a6, a5, a4, a3) #define FCDECL12(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12) rettype F_CALL_CONV funcname(a1, a2, a12, a11, a10, a9, a8, a7, a6, a5, a4, a3) #define FCDECL13(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13) rettype F_CALL_CONV funcname(a1, a2, a13, a12, a11, a10, a9, a8, a7, a6, a5, a4, a3) #define FCDECL14(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14) rettype F_CALL_CONV funcname(a1, a2, a14, a13, a12, a11, a10, a9, a8, a7, a6, a5, a4, a3) #define FCDECL5_IVI(rettype, funcname, a1, a2, a3, a4, a5) rettype F_CALL_CONV funcname(a1, a3, a5, a4, a2) #define FCDECL5_VII(rettype, funcname, a1, a2, a3, a4, a5) rettype F_CALL_CONV funcname(a2, a3, a5, a4, a1) #endif // !SWIZZLE_REGARG_ORDER #if 0 // // don't use something like this... directly calling an FCALL from within the runtime breaks stackwalking because // the FCALL reverse mapping only gets established in ECall::GetFCallImpl and that codepath is circumvented by // directly calling and FCALL // See below for usage of FC_CALL_INNER (used in SecurityStackWalk::Check presently) // #define FCCALL0(funcname) funcname() #define FCCALL1(funcname, a1) funcname(a1) #define FCCALL2(funcname, a1, a2) funcname(a1, a2) #define FCCALL3(funcname, a1, a2, a3) funcname(a1, a2, a3) #define FCCALL4(funcname, a1, a2, a3, a4) funcname(a1, a2, a4, a3) #define FCCALL5(funcname, a1, a2, a3, a4, a5) funcname(a1, a2, a5, a4, a3) #define FCCALL6(funcname, a1, a2, a3, a4, a5, a6) funcname(a1, a2, a6, a5, a4, a3) #define FCCALL7(funcname, a1, a2, a3, a4, a5, a6, a7) funcname(a1, a2, a7, a6, a5, a4, a3) #define FCCALL8(funcname, a1, a2, a3, a4, a5, a6, a7, a8) funcname(a1, a2, a8, a7, a6, a5, a4, a3) #define FCCALL9(funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9) funcname(a1, a2, a9, a8, a7, a6, a5, a4, a3) #define FCCALL10(funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10) funcname(a1, a2, a10, a9, a8, a7, a6, a5, a4, a3) #define FCCALL11(funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11) funcname(a1, a2, a11, a10, a9, a8, a7, a6, a5, a4, a3) #define FCCALL12(funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12) funcname(a1, a2, a12, a11, a10, a9, a8, a7, a6, a5, a4, a3) #endif // 0 #else // !SWIZZLE_STKARG_ORDER #define FCDECL0(rettype, funcname) rettype F_CALL_CONV funcname() #define FCDECL1(rettype, funcname, a1) rettype F_CALL_CONV funcname(a1) #define FCDECL1_V(rettype, funcname, a1) rettype F_CALL_CONV funcname(a1) #define FCDECL2(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(a1, a2) #define FCDECL2VA(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(a1, a2, ...) #define FCDECL2_VV(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(a1, a2) #define FCDECL2_VI(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(a1, a2) #define FCDECL2_IV(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(a1, a2) #define FCDECL3(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(a1, a2, a3) #define FCDECL3_IIV(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(a1, a2, a3) #define FCDECL3_VII(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(a1, a2, a3) #define FCDECL3_IVV(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(a1, a2, a3) #define FCDECL3_IVI(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(a1, a2, a3) #define FCDECL3_VVI(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(a1, a2, a3) #define FCDECL3_VVV(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(a1, a2, a3) #define FCDECL4(rettype, funcname, a1, a2, a3, a4) rettype F_CALL_CONV funcname(a1, a2, a3, a4) #define FCDECL5(rettype, funcname, a1, a2, a3, a4, a5) rettype F_CALL_CONV funcname(a1, a2, a3, a4, a5) #define FCDECL6(rettype, funcname, a1, a2, a3, a4, a5, a6) rettype F_CALL_CONV funcname(a1, a2, a3, a4, a5, a6) #define FCDECL7(rettype, funcname, a1, a2, a3, a4, a5, a6, a7) rettype F_CALL_CONV funcname(a1, a2, a3, a4, a5, a6, a7) #define FCDECL8(rettype, funcname, a1, a2, a3, a4, a5, a6, a7, a8) rettype F_CALL_CONV funcname(a1, a2, a3, a4, a5, a6, a7, a8) #define FCDECL9(rettype, funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9) rettype F_CALL_CONV funcname(a1, a2, a3, a4, a5, a6, a7, a8, a9) #define FCDECL10(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10) rettype F_CALL_CONV funcname(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10) #define FCDECL11(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11) rettype F_CALL_CONV funcname(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11) #define FCDECL12(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12) rettype F_CALL_CONV funcname(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12) #define FCDECL13(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13) rettype F_CALL_CONV funcname(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13) #define FCDECL14(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14) rettype F_CALL_CONV funcname(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14) #define FCDECL5_IVI(rettype, funcname, a1, a2, a3, a4, a5) rettype F_CALL_CONV funcname(a1, a2, a3, a4, a5) #define FCDECL5_VII(rettype, funcname, a1, a2, a3, a4, a5) rettype F_CALL_CONV funcname(a1, a2, a3, a4, a5) #endif // !SWIZZLE_STKARG_ORDER #define HELPER_FRAME_DECL(x) FrameWithCookie<HelperMethodFrame_##x##OBJ> __helperframe // use the capture state machinery if the architecture has one // // For a normal build we create a loop (see explaination on RestoreState below) // We don't want a loop here for PREFAST since that causes // warning 263: Using _alloca in a loop // And we can't use DEBUG_OK_TO_RETURN for PREFAST because the PREFAST version // requires that you already be in a DEBUG_ASSURE_NO_RETURN_BEGIN scope #define HelperMethodFrame_0OBJ HelperMethodFrame #define HELPER_FRAME_ARGS(attribs) __me, attribs #define FORLAZYMACHSTATE(x) x #if defined(_PREFAST_) #define FORLAZYMACHSTATE_BEGINLOOP(x) x #define FORLAZYMACHSTATE_ENDLOOP(x) #define FORLAZYMACHSTATE_DEBUG_OK_TO_RETURN_BEGIN #define FORLAZYMACHSTATE_DEBUG_OK_TO_RETURN_END #else #define FORLAZYMACHSTATE_BEGINLOOP(x) x do #define FORLAZYMACHSTATE_ENDLOOP(x) while(x) #define FORLAZYMACHSTATE_DEBUG_OK_TO_RETURN_BEGIN DEBUG_OK_TO_RETURN_BEGIN(LAZYMACHSTATE) #define FORLAZYMACHSTATE_DEBUG_OK_TO_RETURN_END DEBUG_OK_TO_RETURN_END(LAZYMACHSTATE) #endif // BEGIN: before gcpoll //FCallGCCanTriggerNoDtor __fcallGcCanTrigger; //__fcallGcCanTrigger.Enter(); // END: after gcpoll //__fcallGcCanTrigger.Leave(__FUNCTION__, __FILE__, __LINE__); // We have to put DEBUG_OK_TO_RETURN_BEGIN around the FORLAZYMACHSTATE // to allow the HELPER_FRAME to be installed inside an SO_INTOLERANT region // which does not allow a return. The return is used by FORLAZYMACHSTATE // to capture the state, but is not an actual return, so it is ok. #define HELPER_METHOD_FRAME_BEGIN_EX_BODY(ret, helperFrame, gcpoll, allowGC) \ FORLAZYMACHSTATE_BEGINLOOP(int alwaysZero = 0;) \ { \ INDEBUG(static BOOL __haveCheckedRestoreState = FALSE;) \ PERMIT_HELPER_METHOD_FRAME_BEGIN(); \ CHECK_HELPER_METHOD_FRAME_PERMITTED(); \ helperFrame; \ FORLAZYMACHSTATE_DEBUG_OK_TO_RETURN_BEGIN; \ FORLAZYMACHSTATE(CAPTURE_STATE(__helperframe.MachineState(), ret);) \ FORLAZYMACHSTATE_DEBUG_OK_TO_RETURN_END; \ INDEBUG(__helperframe.SetAddrOfHaveCheckedRestoreState(&__haveCheckedRestoreState)); \ DEBUG_ASSURE_NO_RETURN_BEGIN(HELPER_METHOD_FRAME); \ INCONTRACT(FCallGCCanTrigger::Enter()); #define HELPER_METHOD_FRAME_BEGIN_EX(ret, helperFrame, gcpoll, allowGC) \ HELPER_METHOD_FRAME_BEGIN_EX_BODY(ret, helperFrame, gcpoll, allowGC) \ /* <TODO>TODO TURN THIS ON!!! </TODO> */ \ /* gcpoll; */ \ INSTALL_MANAGED_EXCEPTION_DISPATCHER; \ __helperframe.Push(); \ MAKE_CURRENT_THREAD_AVAILABLE_EX(__helperframe.GetThread()); \ INSTALL_UNWIND_AND_CONTINUE_HANDLER_FOR_HMF(&__helperframe); #define HELPER_METHOD_FRAME_BEGIN_EX_NOTHROW(ret, helperFrame, gcpoll, allowGC, probeFailExpr) \ HELPER_METHOD_FRAME_BEGIN_EX_BODY(ret, helperFrame, gcpoll, allowGC) \ __helperframe.Push(); \ MAKE_CURRENT_THREAD_AVAILABLE_EX(__helperframe.GetThread()); \ /* <TODO>TODO TURN THIS ON!!! </TODO> */ \ /* gcpoll; */ // The while(__helperframe.RestoreState() needs a bit of explanation. // The issue is insuring that the same machine state (which registers saved) // exists when the machine state is probed (when the frame is created, and // when it is actually used (when the frame is popped. We do this by creating // a flow of control from use to def. Note that 'RestoreState' always returns false // we never actually loop, but the compiler does not know that, and thus // will be forced to make the keep the state of register spills the same at // the two locations. #define HELPER_METHOD_FRAME_END_EX_BODY(gcpoll,allowGC) \ /* <TODO>TODO TURN THIS ON!!! </TODO> */ \ /* gcpoll; */ \ DEBUG_ASSURE_NO_RETURN_END(HELPER_METHOD_FRAME); \ INCONTRACT(FCallGCCanTrigger::Leave(__FUNCTION__, __FILE__, __LINE__)); \ FORLAZYMACHSTATE(alwaysZero = \ HelperMethodFrameRestoreState(INDEBUG_COMMA(&__helperframe) \ __helperframe.MachineState());) \ PERMIT_HELPER_METHOD_FRAME_END() \ } FORLAZYMACHSTATE_ENDLOOP(alwaysZero); #define HELPER_METHOD_FRAME_END_EX(gcpoll,allowGC) \ UNINSTALL_UNWIND_AND_CONTINUE_HANDLER; \ __helperframe.Pop(); \ UNINSTALL_MANAGED_EXCEPTION_DISPATCHER; \ HELPER_METHOD_FRAME_END_EX_BODY(gcpoll,allowGC); #define HELPER_METHOD_FRAME_END_EX_NOTHROW(gcpoll,allowGC) \ __helperframe.Pop(); \ HELPER_METHOD_FRAME_END_EX_BODY(gcpoll,allowGC); #define HELPER_METHOD_FRAME_BEGIN_ATTRIB(attribs) \ HELPER_METHOD_FRAME_BEGIN_EX( \ return, \ HELPER_FRAME_DECL(0)(HELPER_FRAME_ARGS(attribs)), \ HELPER_METHOD_POLL(),TRUE) #define HELPER_METHOD_FRAME_BEGIN_0() \ HELPER_METHOD_FRAME_BEGIN_ATTRIB(Frame::FRAME_ATTR_NONE) #define HELPER_METHOD_FRAME_BEGIN_ATTRIB_NOPOLL(attribs) \ HELPER_METHOD_FRAME_BEGIN_EX( \ return, \ HELPER_FRAME_DECL(0)(HELPER_FRAME_ARGS(attribs)), \ {},FALSE) #define HELPER_METHOD_FRAME_BEGIN_NOPOLL() HELPER_METHOD_FRAME_BEGIN_ATTRIB_NOPOLL(Frame::FRAME_ATTR_NONE) #define HELPER_METHOD_FRAME_BEGIN_ATTRIB_1(attribs, arg1) \ static_assert(sizeof(arg1) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\ HELPER_METHOD_FRAME_BEGIN_EX( \ return, \ HELPER_FRAME_DECL(1)(HELPER_FRAME_ARGS(attribs), \ (OBJECTREF*) &arg1), \ HELPER_METHOD_POLL(),TRUE) #define HELPER_METHOD_FRAME_BEGIN_1(arg1) HELPER_METHOD_FRAME_BEGIN_ATTRIB_1(Frame::FRAME_ATTR_NONE, arg1) #define HELPER_METHOD_FRAME_BEGIN_ATTRIB_2(attribs, arg1, arg2) \ static_assert(sizeof(arg1) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\ static_assert(sizeof(arg2) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\ HELPER_METHOD_FRAME_BEGIN_EX( \ return, \ HELPER_FRAME_DECL(2)(HELPER_FRAME_ARGS(attribs), \ (OBJECTREF*) &arg1, (OBJECTREF*) &arg2), \ HELPER_METHOD_POLL(),TRUE) #define HELPER_METHOD_FRAME_BEGIN_2(arg1, arg2) HELPER_METHOD_FRAME_BEGIN_ATTRIB_2(Frame::FRAME_ATTR_NONE, arg1, arg2) #define HELPER_METHOD_FRAME_BEGIN_ATTRIB_3(attribs, arg1, arg2, arg3) \ static_assert(sizeof(arg1) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\ static_assert(sizeof(arg2) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\ static_assert(sizeof(arg3) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\ HELPER_METHOD_FRAME_BEGIN_EX( \ return, \ HELPER_FRAME_DECL(3)(HELPER_FRAME_ARGS(attribs), \ (OBJECTREF*) &arg1, (OBJECTREF*) &arg2, (OBJECTREF*) &arg3), \ HELPER_METHOD_POLL(),TRUE) #define HELPER_METHOD_FRAME_BEGIN_3(arg1, arg2, arg3) HELPER_METHOD_FRAME_BEGIN_ATTRIB_3(Frame::FRAME_ATTR_NONE, arg1, arg2, arg3) #define HELPER_METHOD_FRAME_BEGIN_PROTECT(gc) \ HELPER_METHOD_FRAME_BEGIN_EX( \ return, \ HELPER_FRAME_DECL(PROTECT)(HELPER_FRAME_ARGS(Frame::FRAME_ATTR_NONE), \ (OBJECTREF*)&(gc), sizeof(gc)/sizeof(OBJECTREF)), \ HELPER_METHOD_POLL(),TRUE) #define HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB_NOPOLL(attribs) \ HELPER_METHOD_FRAME_BEGIN_EX( \ return 0, \ HELPER_FRAME_DECL(0)(HELPER_FRAME_ARGS(attribs)), \ {},FALSE) #define HELPER_METHOD_FRAME_BEGIN_RET_VC_ATTRIB_NOPOLL(attribs) \ HELPER_METHOD_FRAME_BEGIN_EX( \ FC_RETURN_VC(), \ HELPER_FRAME_DECL(0)(HELPER_FRAME_ARGS(attribs)), \ {},FALSE) #define HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB(attribs) \ HELPER_METHOD_FRAME_BEGIN_EX( \ return 0, \ HELPER_FRAME_DECL(0)(HELPER_FRAME_ARGS(attribs)), \ HELPER_METHOD_POLL(),TRUE) #define HELPER_METHOD_FRAME_BEGIN_RET_0() \ HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB(Frame::FRAME_ATTR_NONE) #define HELPER_METHOD_FRAME_BEGIN_RET_VC_0() \ HELPER_METHOD_FRAME_BEGIN_EX( \ FC_RETURN_VC(), \ HELPER_FRAME_DECL(0)(HELPER_FRAME_ARGS(Frame::FRAME_ATTR_NONE)), \ HELPER_METHOD_POLL(),TRUE) #define HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB_1(attribs, arg1) \ static_assert(sizeof(arg1) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\ HELPER_METHOD_FRAME_BEGIN_EX( \ return 0, \ HELPER_FRAME_DECL(1)(HELPER_FRAME_ARGS(attribs), \ (OBJECTREF*) &arg1), \ HELPER_METHOD_POLL(),TRUE) #define HELPER_METHOD_FRAME_BEGIN_RET_NOTHROW_1(probeFailExpr, arg1) \ static_assert(sizeof(arg1) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\ HELPER_METHOD_FRAME_BEGIN_EX_NOTHROW( \ return 0, \ HELPER_FRAME_DECL(1)(HELPER_FRAME_ARGS(Frame::FRAME_ATTR_NO_THREAD_ABORT), \ (OBJECTREF*) &arg1), \ HELPER_METHOD_POLL(), TRUE, probeFailExpr) #define HELPER_METHOD_FRAME_BEGIN_RET_VC_ATTRIB_1(attribs, arg1) \ static_assert(sizeof(arg1) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\ HELPER_METHOD_FRAME_BEGIN_EX( \ FC_RETURN_VC(), \ HELPER_FRAME_DECL(1)(HELPER_FRAME_ARGS(attribs), \ (OBJECTREF*) &arg1), \ HELPER_METHOD_POLL(),TRUE) #define HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB_2(attribs, arg1, arg2) \ static_assert(sizeof(arg1) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\ static_assert(sizeof(arg2) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\ HELPER_METHOD_FRAME_BEGIN_EX( \ return 0, \ HELPER_FRAME_DECL(2)(HELPER_FRAME_ARGS(attribs), \ (OBJECTREF*) &arg1, (OBJECTREF*) &arg2), \ HELPER_METHOD_POLL(),TRUE) #define HELPER_METHOD_FRAME_BEGIN_RET_VC_ATTRIB_2(attribs, arg1, arg2) \ static_assert(sizeof(arg1) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\ static_assert(sizeof(arg2) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\ HELPER_METHOD_FRAME_BEGIN_EX( \ FC_RETURN_VC(), \ HELPER_FRAME_DECL(2)(HELPER_FRAME_ARGS(attribs), \ (OBJECTREF*) &arg1, (OBJECTREF*) &arg2), \ HELPER_METHOD_POLL(),TRUE) #define HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB_PROTECT(attribs, gc) \ HELPER_METHOD_FRAME_BEGIN_EX( \ return 0, \ HELPER_FRAME_DECL(PROTECT)(HELPER_FRAME_ARGS(attribs), \ (OBJECTREF*)&(gc), sizeof(gc)/sizeof(OBJECTREF)), \ HELPER_METHOD_POLL(),TRUE) #define HELPER_METHOD_FRAME_BEGIN_RET_VC_NOPOLL() \ HELPER_METHOD_FRAME_BEGIN_RET_VC_ATTRIB_NOPOLL(Frame::FRAME_ATTR_NONE) #define HELPER_METHOD_FRAME_BEGIN_RET_NOPOLL() \ HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB_NOPOLL(Frame::FRAME_ATTR_NONE) #define HELPER_METHOD_FRAME_BEGIN_RET_1(arg1) \ static_assert(sizeof(arg1) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\ HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB_1(Frame::FRAME_ATTR_NONE, arg1) #define HELPER_METHOD_FRAME_BEGIN_RET_VC_1(arg1) \ static_assert(sizeof(arg1) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\ HELPER_METHOD_FRAME_BEGIN_RET_VC_ATTRIB_1(Frame::FRAME_ATTR_NONE, arg1) #define HELPER_METHOD_FRAME_BEGIN_RET_2(arg1, arg2) \ static_assert(sizeof(arg1) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\ static_assert(sizeof(arg2) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\ HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB_2(Frame::FRAME_ATTR_NONE, arg1, arg2) #define HELPER_METHOD_FRAME_BEGIN_RET_VC_2(arg1, arg2) \ static_assert(sizeof(arg1) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\ static_assert(sizeof(arg2) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\ HELPER_METHOD_FRAME_BEGIN_RET_VC_ATTRIB_2(Frame::FRAME_ATTR_NONE, arg1, arg2) #define HELPER_METHOD_FRAME_BEGIN_RET_PROTECT(gc) \ HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB_PROTECT(Frame::FRAME_ATTR_NONE, gc) #define HELPER_METHOD_FRAME_END() HELPER_METHOD_FRAME_END_EX({},FALSE) #define HELPER_METHOD_FRAME_END_POLL() HELPER_METHOD_FRAME_END_EX(HELPER_METHOD_POLL(),TRUE) #define HELPER_METHOD_FRAME_END_NOTHROW()HELPER_METHOD_FRAME_END_EX_NOTHROW({},FALSE) // This is the fastest way to do a GC poll if you have already erected a HelperMethodFrame #define HELPER_METHOD_POLL() { __helperframe.Poll(); INCONTRACT(__fCallCheck.SetDidPoll()); } // The HelperMethodFrame knows how to get its return address. Let other code get at it, too. // (Uses comma operator to call InsureInit & discard result. #define HELPER_METHOD_FRAME_GET_RETURN_ADDRESS() \ ( static_cast<UINT_PTR>( (__helperframe.InsureInit(false, NULL)), (__helperframe.MachineState()->GetRetAddr()) ) ) // Very short routines, or routines that are guarenteed to force GC or EH // don't need to poll the GC. USE VERY SPARINGLY!!! #define FC_GC_POLL_NOT_NEEDED() INCONTRACT(__fCallCheck.SetNotNeeded()) Object* FC_GCPoll(void* me, Object* objToProtect = NULL); #define FC_GC_POLL_EX(ret) \ { \ INCONTRACT(Thread::TriggersGC(GetThread());) \ INCONTRACT(__fCallCheck.SetDidPoll();) \ if (g_TrapReturningThreads.LoadWithoutBarrier()) \ { \ if (FC_GCPoll(__me)) \ return ret; \ while (0 == FC_NO_TAILCALL) { }; /* side effect the compile can't remove */ \ } \ } #define FC_GC_POLL() FC_GC_POLL_EX(;) #define FC_GC_POLL_RET() FC_GC_POLL_EX(0) #define FC_GC_POLL_AND_RETURN_OBJREF(obj) \ { \ INCONTRACT(__fCallCheck.SetDidPoll();) \ Object* __temp = OBJECTREFToObject(obj); \ if (g_TrapReturningThreads.LoadWithoutBarrier()) \ { \ __temp = FC_GCPoll(__me, __temp); \ while (0 == FC_NO_TAILCALL) { }; /* side effect the compile can't remove */ \ } \ return __temp; \ } #if defined(ENABLE_CONTRACTS) #define FC_CAN_TRIGGER_GC() FCallGCCanTrigger::Enter() #define FC_CAN_TRIGGER_GC_END() FCallGCCanTrigger::Leave(__FUNCTION__, __FILE__, __LINE__) #define FC_CAN_TRIGGER_GC_HAVE_THREAD(thread) FCallGCCanTrigger::Enter(thread) #define FC_CAN_TRIGGER_GC_HAVE_THREADEND(thread) FCallGCCanTrigger::Leave(thread, __FUNCTION__, __FILE__, __LINE__) // turns on forbidGC for the lifetime of the instance class ForbidGC { protected: Thread *m_pThread; public: ForbidGC(const char *szFile, int lineNum); ~ForbidGC(); }; // this little helper class checks to make certain // 1) ForbidGC is set throughout the routine. // 2) Sometime during the routine, a GC poll is done class FCallCheck : public ForbidGC { public: FCallCheck(const char *szFile, int lineNum); ~FCallCheck(); void SetDidPoll() {LIMITED_METHOD_CONTRACT; didGCPoll = true; } void SetNotNeeded() {LIMITED_METHOD_CONTRACT; notNeeded = true; } private: #ifdef _DEBUG DWORD unbreakableLockCount; #endif bool didGCPoll; // GC poll was done bool notNeeded; // GC poll not needed unsigned __int64 startTicks; // tick count at beginning of FCall }; // FC_COMMON_PROLOG is used for both FCalls and HCalls #define FC_COMMON_PROLOG(target, assertFn) \ /* The following line has to be first. We do not want to trash last error */ \ DWORD __lastError = ::GetLastError(); \ static void* __cache = 0; \ assertFn(__cache, (LPVOID)target); \ { \ Thread *_pThread = GetThread(); \ Thread::ObjectRefFlush(_pThread); \ } \ FCallCheck __fCallCheck(__FILE__, __LINE__); \ FCALL_TRANSITION_BEGIN(); \ ::SetLastError(__lastError); \ void FCallAssert(void*& cache, void* target); void HCallAssert(void*& cache, void* target); #else #define FC_COMMON_PROLOG(target, assertFn) FCALL_TRANSITION_BEGIN() #define FC_CAN_TRIGGER_GC() #define FC_CAN_TRIGGER_GC_END() #endif // ENABLE_CONTRACTS // #FC_INNER // Macros that allows fcall to be split into two function to avoid the helper frame overhead on common fast // codepaths. // // The helper routine needs to know the name of the routine that called it so that it can look up the name of // the managed routine this code is associted with (for managed stack traces). This is passed with the // FC_INNER_PROLOG macro. // // The helper can set up a HELPER_METHOD_FRAME, but should pass the // Frame::FRAME_ATTR_EXACT_DEPTH|Frame::FRAME_ATTR_CAPTURE_DEPTH_2 which indicates the exact number of // unwinds to do to get back to managed code. Currently we only support depth 2 which means that the // HELPER_METHOD_FRAME needs to be set up in the function directly called by the FCALL. The helper should // use the NOINLINE macro to prevent the compiler from inlining it into the FCALL (which would obviously // mess up the unwind count). // // The other invarient that needs to hold is that the epilog walker needs to be able to get from the call to // the helper routine to the end of the FCALL using trivial heurisitics. The easiest (and only supported) // way of doing this is to place your helper right before a return (eg at the end of the method). Generally // this is not a problem at all, since the FCALL itself will pick off some common case and then tail-call to // the helper for everything else. You must use the code:FC_INNER_RETURN macros to do the call, to insure // that the C++ compiler does not tail-call optimize the call to the inner function and mess up the stack // depth. // // see code:ObjectNative::GetClass for an example // #define FC_INNER_PROLOG(outerfuncname) \ LPVOID __me; \ __me = GetEEFuncEntryPointMacro(outerfuncname); \ FC_CAN_TRIGGER_GC(); \ INCONTRACT(FCallCheck __fCallCheck(__FILE__, __LINE__)); // This variant should be used for inner fcall functions that have the // __me value passed as an argument to the function. This allows // inner functions to be shared across multiple fcalls. #define FC_INNER_PROLOG_NO_ME_SETUP() \ FC_CAN_TRIGGER_GC(); \ INCONTRACT(FCallCheck __fCallCheck(__FILE__, __LINE__)); #define FC_INNER_EPILOG() \ FC_CAN_TRIGGER_GC_END(); // If you are using FC_INNER, and you are tail calling to the helper method (a common case), then you need // to use the FC_INNER_RETURN macros (there is one for methods that return a value and another if the // function returns void). This macro's purpose is to inhibit any tail calll optimization the C++ compiler // might do, which would otherwise confuse the epilog walker. // // * See #FC_INNER for more extern RAW_KEYWORD(volatile) int FC_NO_TAILCALL; #define FC_INNER_RETURN(type, expr) \ type __retVal = expr; \ while (0 == FC_NO_TAILCALL) { }; /* side effect the compile can't remove */ \ return(__retVal); #define FC_INNER_RETURN_VOID(stmt) \ stmt; \ while (0 == FC_NO_TAILCALL) { }; /* side effect the compile can't remove */ \ return; //============================================================================================== // FIMPLn: A set of macros for generating the proto for the actual // implementation (use FDECLN for header protos.) // // The hidden "__me" variable lets us recover the original MethodDesc* // so any thrown exceptions will have the correct stack trace. FCThrow() // passes this along to __FCThrowInternal(). //============================================================================================== #define GetEEFuncEntryPointMacro(func) ((LPVOID)(func)) #define FCIMPL_PROLOG(funcname) \ LPVOID __me; \ __me = GetEEFuncEntryPointMacro(funcname); \ FC_COMMON_PROLOG(__me, FCallAssert) #if defined(_DEBUG) && !defined(__GNUC__) // Build the list of all fcalls signatures. It is used in binder.cpp to verify // compatibility of managed and unmanaged fcall signatures. The check is currently done // for x86 only. #define CHECK_FCALL_SIGNATURE #endif #ifdef CHECK_FCALL_SIGNATURE struct FCSigCheck { public: FCSigCheck(void* fnc, const char* sig) { LIMITED_METHOD_CONTRACT; func = fnc; signature = sig; next = g_pFCSigCheck; g_pFCSigCheck = this; } FCSigCheck* next; void* func; const char* signature; static FCSigCheck* g_pFCSigCheck; }; #define FCSIGCHECK(funcname, signature) \ static FCSigCheck UNIQUE_LABEL(FCSigCheck)(GetEEFuncEntryPointMacro(funcname), signature); #else // CHECK_FCALL_SIGNATURE #define FCSIGCHECK(funcname, signature) #endif // !CHECK_FCALL_SIGNATURE #ifdef SWIZZLE_STKARG_ORDER #ifdef SWIZZLE_REGARG_ORDER #define FCIMPL0(rettype, funcname) rettype F_CALL_CONV funcname() { FCIMPL_PROLOG(funcname) #define FCIMPL1(rettype, funcname, a1) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, a1) { FCIMPL_PROLOG(funcname) #define FCIMPL1_V(rettype, funcname, a1) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, int /* ECX */, a1) { FCIMPL_PROLOG(funcname) #define FCIMPL2(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1) { FCIMPL_PROLOG(funcname) #define FCIMPL2_VV(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, int /* ECX */, a2, a1) { FCIMPL_PROLOG(funcname) #define FCIMPL2_VI(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, a2, a1) { FCIMPL_PROLOG(funcname) #define FCIMPL2_IV(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, a1, a2) { FCIMPL_PROLOG(funcname) #define FCIMPL3(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL3_IIV(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL3_VII(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(int /* EAX */, a3, a2, a1) { FCIMPL_PROLOG(funcname) #define FCIMPL3_IVV(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, a1, a3, a2) { FCIMPL_PROLOG(funcname) #define FCIMPL3_IVI(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(int /* EAX */, a3, a1, a2) { FCIMPL_PROLOG(funcname) #define FCIMPL3_VVI(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, a3, a2, a1) { FCIMPL_PROLOG(funcname) #define FCIMPL3_VVV(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, int /* ECX */, a3, a2, a1) { FCIMPL_PROLOG(funcname) #define FCIMPL4(rettype, funcname, a1, a2, a3, a4) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL5(rettype, funcname, a1, a2, a3, a4, a5) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a5, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL6(rettype, funcname, a1, a2, a3, a4, a5, a6) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL7(rettype, funcname, a1, a2, a3, a4, a5, a6, a7) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a7, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL8(rettype, funcname, a1, a2, a3, a4, a5, a6, a7, a8) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a8, a7, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL9(rettype, funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a9, a8, a7, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL10(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a10, a9, a8, a7, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL11(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a11, a10, a9, a8, a7, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL12(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a12, a11, a10, a9, a8, a7, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL13(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a13, a12, a11, a10, a9, a8, a7, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL14(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a14, a13, a12, a11, a10, a9, a8, a7, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL5_IVI(rettype, funcname, a1, a2, a3, a4, a5) rettype F_CALL_CONV funcname(int /* EAX */, a3, a1, a5, a4, a2) { FCIMPL_PROLOG(funcname) #define FCIMPL5_VII(rettype, funcname, a1, a2, a3, a4, a5) rettype F_CALL_CONV funcname(int /* EAX */, a3, a2, a5, a4, a1) { FCIMPL_PROLOG(funcname) #else // SWIZZLE_REGARG_ORDER #define FCIMPL0(rettype, funcname) FCSIGCHECK(funcname, #rettype) \ rettype F_CALL_CONV funcname() { FCIMPL_PROLOG(funcname) #define FCIMPL1(rettype, funcname, a1) FCSIGCHECK(funcname, #rettype "," #a1) \ rettype F_CALL_CONV funcname(a1) { FCIMPL_PROLOG(funcname) #define FCIMPL1_V(rettype, funcname, a1) FCSIGCHECK(funcname, #rettype "," "V" #a1) \ rettype F_CALL_CONV funcname(a1) { FCIMPL_PROLOG(funcname) #define FCIMPL2(rettype, funcname, a1, a2) FCSIGCHECK(funcname, #rettype "," #a1 "," #a2) \ rettype F_CALL_CONV funcname(a1, a2) { FCIMPL_PROLOG(funcname) #define FCIMPL2VA(rettype, funcname, a1, a2) FCSIGCHECK(funcname, #rettype "," #a1 "," #a2 "," "...") \ rettype F_CALL_VA_CONV funcname(a1, a2, ...) { FCIMPL_PROLOG(funcname) #define FCIMPL2_VV(rettype, funcname, a1, a2) FCSIGCHECK(funcname, #rettype "," "V" #a1 "," "V" #a2) \ rettype F_CALL_CONV funcname(a2, a1) { FCIMPL_PROLOG(funcname) #define FCIMPL2_VI(rettype, funcname, a1, a2) FCSIGCHECK(funcname, #rettype "," "V" #a1 "," #a2) \ rettype F_CALL_CONV funcname(a2, a1) { FCIMPL_PROLOG(funcname) #define FCIMPL2_IV(rettype, funcname, a1, a2) FCSIGCHECK(funcname, #rettype "," #a1 "," "V" #a2) \ rettype F_CALL_CONV funcname(a1, a2) { FCIMPL_PROLOG(funcname) #define FCIMPL3(rettype, funcname, a1, a2, a3) FCSIGCHECK(funcname, #rettype "," #a1 "," #a2 "," #a3) \ rettype F_CALL_CONV funcname(a1, a2, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL3_IIV(rettype, funcname, a1, a2, a3) FCSIGCHECK(funcname, #rettype "," #a1 "," #a2 "," "V" #a3) \ rettype F_CALL_CONV funcname(a1, a2, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL3_VII(rettype, funcname, a1, a2, a3) FCSIGCHECK(funcname, #rettype "," "V" #a1 "," #a2 "," #a3) \ rettype F_CALL_CONV funcname(a2, a3, a1) { FCIMPL_PROLOG(funcname) #define FCIMPL3_IVV(rettype, funcname, a1, a2, a3) FCSIGCHECK(funcname, #rettype "," #a1 "," "V" #a2 "," "V" #a3) \ rettype F_CALL_CONV funcname(a1, a3, a2) { FCIMPL_PROLOG(funcname) #define FCIMPL3_IVI(rettype, funcname, a1, a2, a3) FCSIGCHECK(funcname, #rettype "," #a1 "," "V" #a2 "," #a3) \ rettype F_CALL_CONV funcname(a1, a3, a2) { FCIMPL_PROLOG(funcname) #define FCIMPL3_VVI(rettype, funcname, a1, a2, a3) FCSIGCHECK(funcname, #rettype "," "V" #a1 "," "V" #a2 "," #a3) \ rettype F_CALL_CONV funcname(a2, a1, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL3_VVV(rettype, funcname, a1, a2, a3) FCSIGCHECK(funcname, #rettype "," "V" #a1 "," "V" #a2 "," "V" #a3) \ rettype F_CALL_CONV funcname(a3, a2, a1) { FCIMPL_PROLOG(funcname) #define FCIMPL4(rettype, funcname, a1, a2, a3, a4) FCSIGCHECK(funcname, #rettype "," #a1 "," #a2 "," #a3 "," #a4) \ rettype F_CALL_CONV funcname(a1, a2, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL5(rettype, funcname, a1, a2, a3, a4, a5) FCSIGCHECK(funcname, #rettype "," #a1 "," #a2 "," #a3 "," #a4 "," #a5) \ rettype F_CALL_CONV funcname(a1, a2, a5, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL6(rettype, funcname, a1, a2, a3, a4, a5, a6) FCSIGCHECK(funcname, #rettype "," #a1 "," #a2 "," #a3 "," #a4 "," #a5 "," #a6) \ rettype F_CALL_CONV funcname(a1, a2, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL7(rettype, funcname, a1, a2, a3, a4, a5, a6, a7) FCSIGCHECK(funcname, #rettype "," #a1 "," #a2 "," #a3 "," #a4 "," #a5 "," #a6 "," #a7) \ rettype F_CALL_CONV funcname(a1, a2, a7, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL8(rettype, funcname, a1, a2, a3, a4, a5, a6, a7, a8) FCSIGCHECK(funcname, #rettype "," #a1 "," #a2 "," #a3 "," #a4 "," #a5 "," #a6 "," #a7 "," #a8) \ rettype F_CALL_CONV funcname(a1, a2, a8, a7, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL9(rettype, funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9) FCSIGCHECK(funcname, #rettype "," #a1 "," #a2 "," #a3 "," #a4 "," #a5 "," #a6 "," #a7 "," #a8 "," #a9) \ rettype F_CALL_CONV funcname(a1, a2, a9, a8, a7, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL10(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10) FCSIGCHECK(funcname, #rettype "," #a1 "," #a2 "," #a3 "," #a4 "," #a5 "," #a6 "," #a7 "," #a8 "," #a9 "," #a10) \ rettype F_CALL_CONV funcname(a1, a2, a10, a9, a8, a7, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL11(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11) FCSIGCHECK(funcname, #rettype "," #a1 "," #a2 "," #a3 "," #a4 "," #a5 "," #a6 "," #a7 "," #a8 "," #a9 "," #a10 "," #a11) \ rettype F_CALL_CONV funcname(a1, a2, a11, a10, a9, a8, a7, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL12(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12) FCSIGCHECK(funcname, #rettype "," #a1 "," #a2 "," #a3 "," #a4 "," #a5 "," #a6 "," #a7 "," #a8 "," #a9 "," #a10 "," #a11 "," #a12) \ rettype F_CALL_CONV funcname(a1, a2, a12, a11, a10, a9, a8, a7, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL13(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13) FCSIGCHECK(funcname, #rettype "," #a1 "," #a2 "," #a3 "," #a4 "," #a5 "," #a6 "," #a7 "," #a8 "," #a9 "," #a10 "," #a11 "," #a12 "," #a13) \ rettype F_CALL_CONV funcname(a1, a2, a13, a12, a11, a10, a9, a8, a7, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL14(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14) FCSIGCHECK(funcname, #rettype "," #a1 "," #a2 "," #a3 "," #a4 "," #a5 "," #a6 "," #a7 "," #a8 "," #a9 "," #a10 "," #a11 "," #a12 "," #a13 "," #a14) \ rettype F_CALL_CONV funcname(a1, a2, a14, a13, a12, a11, a10, a9, a8, a7, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL5_IVI(rettype, funcname, a1, a2, a3, a4, a5) FCSIGCHECK(funcname, #rettype "," #a1 "," "V" #a2 "," #a3 "," #a4 "," #a5) \ rettype F_CALL_CONV funcname(a1, a3, a5, a4, a2) { FCIMPL_PROLOG(funcname) #define FCIMPL5_VII(rettype, funcname, a1, a2, a3, a4, a5) FCSIGCHECK(funcname, #rettype "," "V" #a1 "," #a2 "," #a3 "," #a4 "," #a5) \ rettype F_CALL_CONV funcname(a2, a3, a5, a4, a1) { FCIMPL_PROLOG(funcname) #endif // !SWIZZLE_REGARG_ORDER #else // SWIZZLE_STKARG_ORDER #define FCIMPL0(rettype, funcname) rettype funcname() { FCIMPL_PROLOG(funcname) #define FCIMPL1(rettype, funcname, a1) rettype funcname(a1) { FCIMPL_PROLOG(funcname) #define FCIMPL1_V(rettype, funcname, a1) rettype funcname(a1) { FCIMPL_PROLOG(funcname) #define FCIMPL2(rettype, funcname, a1, a2) rettype funcname(a1, a2) { FCIMPL_PROLOG(funcname) #define FCIMPL2VA(rettype, funcname, a1, a2) rettype funcname(a1, a2, ...) { FCIMPL_PROLOG(funcname) #define FCIMPL2_VV(rettype, funcname, a1, a2) rettype funcname(a1, a2) { FCIMPL_PROLOG(funcname) #define FCIMPL2_VI(rettype, funcname, a1, a2) rettype funcname(a1, a2) { FCIMPL_PROLOG(funcname) #define FCIMPL2_IV(rettype, funcname, a1, a2) rettype funcname(a1, a2) { FCIMPL_PROLOG(funcname) #define FCIMPL3(rettype, funcname, a1, a2, a3) rettype funcname(a1, a2, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL3_IIV(rettype, funcname, a1, a2, a3) rettype funcname(a1, a2, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL3_IVV(rettype, funcname, a1, a2, a3) rettype funcname(a1, a2, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL3_VII(rettype, funcname, a1, a2, a3) rettype funcname(a1, a2, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL3_IVI(rettype, funcname, a1, a2, a3) rettype funcname(a1, a2, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL3_VVI(rettype, funcname, a1, a2, a3) rettype funcname(a1, a2, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL3_VVV(rettype, funcname, a1, a2, a3) rettype funcname(a1, a2, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL4(rettype, funcname, a1, a2, a3, a4) rettype funcname(a1, a2, a3, a4) { FCIMPL_PROLOG(funcname) #define FCIMPL5(rettype, funcname, a1, a2, a3, a4, a5) rettype funcname(a1, a2, a3, a4, a5) { FCIMPL_PROLOG(funcname) #define FCIMPL6(rettype, funcname, a1, a2, a3, a4, a5, a6) rettype funcname(a1, a2, a3, a4, a5, a6) { FCIMPL_PROLOG(funcname) #define FCIMPL7(rettype, funcname, a1, a2, a3, a4, a5, a6, a7) rettype funcname(a1, a2, a3, a4, a5, a6, a7) { FCIMPL_PROLOG(funcname) #define FCIMPL8(rettype, funcname, a1, a2, a3, a4, a5, a6, a7, a8) rettype funcname(a1, a2, a3, a4, a5, a6, a7, a8) { FCIMPL_PROLOG(funcname) #define FCIMPL9(rettype, funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9) rettype funcname(a1, a2, a3, a4, a5, a6, a7, a8, a9) { FCIMPL_PROLOG(funcname) #define FCIMPL10(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10) rettype funcname(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10) { FCIMPL_PROLOG(funcname) #define FCIMPL11(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11) rettype funcname(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11) { FCIMPL_PROLOG(funcname) #define FCIMPL12(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12) rettype funcname(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12) { FCIMPL_PROLOG(funcname) #define FCIMPL13(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13) rettype funcname(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13) { FCIMPL_PROLOG(funcname) #define FCIMPL14(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14) rettype funcname(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14) { FCIMPL_PROLOG(funcname) #define FCIMPL5_IVI(rettype, funcname, a1, a2, a3, a4, a5) rettype funcname(a1, a2, a3, a4, a5) { FCIMPL_PROLOG(funcname) #define FCIMPL5_VII(rettype, funcname, a1, a2, a3, a4, a5) rettype funcname(a1, a2, a3, a4, a5) { FCIMPL_PROLOG(funcname) #endif // !SWIZZLE_STKARG_ORDER //============================================================================================== // Use this to terminte an FCIMPLEND. //============================================================================================== #define FCIMPL_EPILOG() FCALL_TRANSITION_END() #define FCIMPLEND FCIMPL_EPILOG(); } #define HCIMPL_PROLOG(funcname) LPVOID __me; __me = 0; FC_COMMON_PROLOG(funcname, HCallAssert) // HCIMPL macros are just like their FCIMPL counterparts, however // they do not remember the function they come from. Thus they will not // show up in a stack trace. This is what you want for JIT helpers and the like #ifdef SWIZZLE_STKARG_ORDER #ifdef SWIZZLE_REGARG_ORDER #define HCIMPL0(rettype, funcname) rettype F_CALL_CONV funcname() { HCIMPL_PROLOG(funcname) #define HCIMPL1(rettype, funcname, a1) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, a1) { HCIMPL_PROLOG(funcname) #define HCIMPL1_RAW(rettype, funcname, a1) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, a1) { #define HCIMPL1_V(rettype, funcname, a1) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, int /* ECX */, a1) { HCIMPL_PROLOG(funcname) #define HCIMPL2(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1) { HCIMPL_PROLOG(funcname) #define HCIMPL2_RAW(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1) { #define HCIMPL2_VV(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, int /* ECX */, a2, a1) { HCIMPL_PROLOG(funcname) #define HCIMPL2_IV(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, a1, a2) { HCIMPL_PROLOG(funcname) #define HCIMPL3(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a3) { HCIMPL_PROLOG(funcname) #define HCIMPL3_RAW(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a3) { #define HCIMPL4(rettype, funcname, a1, a2, a3, a4) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a4, a3) { HCIMPL_PROLOG(funcname) #define HCIMPL5(rettype, funcname, a1, a2, a3, a4, a5) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a5, a4, a3) { HCIMPL_PROLOG(funcname) #define HCCALL0(funcname) funcname() #define HCCALL1(funcname, a1) funcname(0, 0, a1) #define HCCALL1_V(funcname, a1) funcname(0, 0, 0, a1) #define HCCALL2(funcname, a1, a2) funcname(0, a2, a1) #define HCCALL3(funcname, a1, a2, a3) funcname(0, a2, a1, a3) #define HCCALL4(funcname, a1, a2, a3, a4) funcname(0, a2, a1, a4, a3) #define HCCALL5(funcname, a1, a2, a3, a4, a5) funcname(0, a2, a1, a5, a4, a3) #define HCCALL1_PTR(rettype, funcptr, a1) rettype (F_CALL_CONV * funcptr)(int /* EAX */, int /* EDX */, a1) #define HCCALL2_PTR(rettype, funcptr, a1, a2) rettype (F_CALL_CONV * funcptr)(int /* EAX */, a2, a1) #else // SWIZZLE_REGARG_ORDER #define HCIMPL0(rettype, funcname) rettype F_CALL_CONV funcname() { HCIMPL_PROLOG(funcname) #define HCIMPL1(rettype, funcname, a1) rettype F_CALL_CONV funcname(a1) { HCIMPL_PROLOG(funcname) #define HCIMPL1_RAW(rettype, funcname, a1) rettype F_CALL_CONV funcname(a1) { #define HCIMPL1_V(rettype, funcname, a1) rettype F_CALL_CONV funcname(a1) { HCIMPL_PROLOG(funcname) #define HCIMPL2(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(a1, a2) { HCIMPL_PROLOG(funcname) #define HCIMPL2_RAW(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(a1, a2) { #define HCIMPL2_VV(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(a2, a1) { HCIMPL_PROLOG(funcname) #define HCIMPL2_IV(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(a1, a2) { HCIMPL_PROLOG(funcname) #define HCIMPL3(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(a1, a2, a3) { HCIMPL_PROLOG(funcname) #define HCIMPL3_RAW(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(a1, a2, a3) { #define HCIMPL4(rettype, funcname, a1, a2, a3, a4) rettype F_CALL_CONV funcname(a1, a2, a4, a3) { HCIMPL_PROLOG(funcname) #define HCIMPL5(rettype, funcname, a1, a2, a3, a4, a5) rettype F_CALL_CONV funcname(a1, a2, a5, a4, a3) { HCIMPL_PROLOG(funcname) #define HCCALL0(funcname) funcname() #define HCCALL1(funcname, a1) funcname(a1) #define HCCALL1_V(funcname, a1) funcname(a1) #define HCCALL2(funcname, a1, a2) funcname(a1, a2) #define HCCALL3(funcname, a1, a2, a3) funcname(a1, a2, a3) #define HCCALL4(funcname, a1, a2, a3, a4) funcname(a1, a2, a4, a3) #define HCCALL5(funcname, a1, a2, a3, a4, a5) funcname(a1, a2, a5, a4, a3) #define HCCALL1_PTR(rettype, funcptr, a1) rettype (F_CALL_CONV * (funcptr))(a1) #define HCCALL2_PTR(rettype, funcptr, a1, a2) rettype (F_CALL_CONV * (funcptr))(a1, a2) #endif // !SWIZZLE_REGARG_ORDER #else // SWIZZLE_STKARG_ORDER #define HCIMPL0(rettype, funcname) rettype F_CALL_CONV funcname() { HCIMPL_PROLOG(funcname) #define HCIMPL1(rettype, funcname, a1) rettype F_CALL_CONV funcname(a1) { HCIMPL_PROLOG(funcname) #define HCIMPL1_RAW(rettype, funcname, a1) rettype F_CALL_CONV funcname(a1) { #define HCIMPL1_V(rettype, funcname, a1) rettype F_CALL_CONV funcname(a1) { HCIMPL_PROLOG(funcname) #define HCIMPL2(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(a1, a2) { HCIMPL_PROLOG(funcname) #define HCIMPL2_RAW(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(a1, a2) { #define HCIMPL2_VV(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(a1, a2) { HCIMPL_PROLOG(funcname) #define HCIMPL2_IV(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(a1, a2) { HCIMPL_PROLOG(funcname) #define HCIMPL3(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(a1, a2, a3) { HCIMPL_PROLOG(funcname) #define HCIMPL3_RAW(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(a1, a2, a3) { #define HCIMPL4(rettype, funcname, a1, a2, a3, a4) rettype F_CALL_CONV funcname(a1, a2, a3, a4) { HCIMPL_PROLOG(funcname) #define HCIMPL5(rettype, funcname, a1, a2, a3, a4, a5) rettype F_CALL_CONV funcname(a1, a2, a3, a4, a5) { HCIMPL_PROLOG(funcname) #define HCCALL0(funcname) funcname() #define HCCALL1(funcname, a1) funcname(a1) #define HCCALL1_V(funcname, a1) funcname(a1) #define HCCALL2(funcname, a1, a2) funcname(a1, a2) #define HCCALL3(funcname, a1, a2, a3) funcname(a1, a2, a3) #define HCCALL4(funcname, a1, a2, a3, a4) funcname(a1, a2, a3, a4) #define HCCALL5(funcname, a1, a2, a3, a4, a5) funcname(a1, a2, a3, a4, a5) #define HCCALL1_PTR(rettype, funcptr, a1) rettype (F_CALL_CONV * (funcptr))(a1) #define HCCALL2_PTR(rettype, funcptr, a1, a2) rettype (F_CALL_CONV * (funcptr))(a1, a2) #endif // !SWIZZLE_STKARG_ORDER #define HCIMPLEND_RAW } #define HCIMPLEND FCALL_TRANSITION_END(); } //============================================================================================== // Throws an exception from an FCall. See rexcep.h for a list of valid // exception codes. //============================================================================================== #define FCThrow(reKind) FCThrowEx(reKind, 0, 0, 0, 0) //============================================================================================== // This version lets you attach a message with inserts (similar to // COMPlusThrow()). //============================================================================================== #define FCThrowEx(reKind, resID, arg1, arg2, arg3) \ { \ while (NULL == \ __FCThrow(__me, reKind, resID, arg1, arg2, arg3)) {}; \ return 0; \ } //============================================================================================== // Like FCThrow but can be used for a VOID-returning FCall. The only // difference is in the "return" statement. //============================================================================================== #define FCThrowVoid(reKind) FCThrowExVoid(reKind, 0, 0, 0, 0) //============================================================================================== // This version lets you attach a message with inserts (similar to // COMPlusThrow()). //============================================================================================== #define FCThrowExVoid(reKind, resID, arg1, arg2, arg3) \ { \ while (NULL == \ __FCThrow(__me, reKind, resID, arg1, arg2, arg3)) {}; \ return; \ } // Use FCThrowRes to throw an exception with a localized error message from the // ResourceManager in managed code. #define FCThrowRes(reKind, resourceName) FCThrowArgumentEx(reKind, NULL, resourceName) #define FCThrowArgumentNull(argName) FCThrowArgumentEx(kArgumentNullException, argName, NULL) #define FCThrowArgumentOutOfRange(argName, message) FCThrowArgumentEx(kArgumentOutOfRangeException, argName, message) #define FCThrowArgument(argName, message) FCThrowArgumentEx(kArgumentException, argName, message) #define FCThrowArgumentEx(reKind, argName, resourceName) \ { \ while (NULL == \ __FCThrowArgument(__me, reKind, argName, resourceName)) {}; \ return 0; \ } // Use FCThrowRes to throw an exception with a localized error message from the // ResourceManager in managed code. #define FCThrowResVoid(reKind, resourceName) FCThrowArgumentVoidEx(reKind, NULL, resourceName) #define FCThrowArgumentNullVoid(argName) FCThrowArgumentVoidEx(kArgumentNullException, argName, NULL) #define FCThrowArgumentOutOfRangeVoid(argName, message) FCThrowArgumentVoidEx(kArgumentOutOfRangeException, argName, message) #define FCThrowArgumentVoid(argName, message) FCThrowArgumentVoidEx(kArgumentException, argName, message) #define FCThrowArgumentVoidEx(reKind, argName, resourceName) \ { \ while (NULL == \ __FCThrowArgument(__me, reKind, argName, resourceName)) {}; \ return; \ } // The x86 JIT calling convention expects returned small types (e.g. bool) to be // widened on return. The C/C++ calling convention does not guarantee returned // small types to be widened. The small types has to be artifically widened on return // to fit x86 JIT calling convention. Thus fcalls returning small types has to // use the FC_XXX_RET types to force C/C++ compiler to do the widening. // // The most common small return type of FCALLs is bool. The widening of bool is // especially tricky since the value has to be also normalized. FC_BOOL_RET and // FC_RETURN_BOOL macros are provided to make it fool-proof. FCALLs returning bool // should be implemented using following pattern: // FCIMPL0(FC_BOOL_RET, Foo) // the return type should be FC_BOOL_RET // BOOL ret; // // FC_RETURN_BOOL(ret); // return statements should be FC_RETURN_BOOL // FCIMPLEND // This rules are verified in binder.cpp if COMPlus_ConsistencyCheck is set. #ifdef _PREFAST_ // Use prefast build to ensure that functions returning FC_BOOL_RET // are using FC_RETURN_BOOL to return it. Missing FC_RETURN_BOOL will // result into type mismatch error in prefast builds. This will also // catch misuses of FC_BOOL_RET for other places (e.g. in FCALL parameters). typedef LPVOID FC_BOOL_RET; #define FC_RETURN_BOOL(x) do { return (LPVOID)!!(x); } while(0) #else #if defined(TARGET_X86) || defined(TARGET_AMD64) // The return value is artifically widened on x86 and amd64 typedef INT32 FC_BOOL_RET; #else typedef CLR_BOOL FC_BOOL_RET; #endif #define FC_RETURN_BOOL(x) do { return !!(x); } while(0) #endif #if defined(TARGET_X86) || defined(TARGET_AMD64) // The return value is artifically widened on x86 and amd64 typedef UINT32 FC_CHAR_RET; typedef INT32 FC_INT8_RET; typedef UINT32 FC_UINT8_RET; typedef INT32 FC_INT16_RET; typedef UINT32 FC_UINT16_RET; #else typedef CLR_CHAR FC_CHAR_RET; typedef INT8 FC_INT8_RET; typedef UINT8 FC_UINT8_RET; typedef INT16 FC_INT16_RET; typedef UINT16 FC_UINT16_RET; #endif // FC_TypedByRef should be used for TypedReferences in FCall signatures #define FC_TypedByRef TypedByRef #define FC_DECIMAL DECIMAL // The fcall entrypoints has to be at unique addresses. Use this helper macro to make // the code of the fcalls unique if you get assert in ecall.cpp that mentions it. // The parameter of the FCUnique macro is an arbitrary 32-bit random non-zero number. #define FCUnique(unique) { Volatile<int> u = (unique); while (u.LoadWithoutBarrier() == 0) { }; } // FCALL contracts come in two forms: // // Short form that should be used if the FCALL contract does not have any extras like preconditions, failure injection. Example: // // FCIMPL0(void, foo) // { // FCALL_CONTRACT; // ... // // Long form that should be used otherwise. Example: // // FCIMPL1(void, foo, void *p) // { // CONTRACTL { // FCALL_CHECK; // PRECONDITION(CheckPointer(p)); // } CONTRACTL_END; // ... // // FCALL_CHECK defines the actual contract conditions required for FCALLs // #define FCALL_CHECK \ THROWS; \ DISABLED(GC_TRIGGERS); /* FCALLS with HELPER frames have issues with GC_TRIGGERS */ \ MODE_COOPERATIVE; // // FCALL_CONTRACT should be the following shortcut: // // #define FCALL_CONTRACT CONTRACTL { FCALL_CHECK; } CONTRACTL_END; // // Since there is very little value in having runtime contracts in FCalls, FCALL_CONTRACT is defined as static contract only for performance reasons. // #define FCALL_CONTRACT \ STATIC_CONTRACT_THROWS; \ /* FCALLS are a special case contract wise, they are "NOTRIGGER, unless you setup a frame" */ \ STATIC_CONTRACT_GC_NOTRIGGER; \ STATIC_CONTRACT_MODE_COOPERATIVE #endif //__FCall_h__
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // FCall.H // // // FCall is a high-performance alternative to ECall. Unlike ECall, FCall // methods do not necessarily create a frame. Jitted code calls directly // to the FCall entry point. It is possible to do operations that need // to have a frame within an FCall, you need to manually set up the frame // before you do such operations. // It is illegal to cause a GC or EH to happen in an FCALL before setting // up a frame. To prevent accidentally violating this rule, FCALLs turn // on BEGINGCFORBID, which insures that these things can't happen in a // checked build without causing an ASSERTE. Once you set up a frame, // this state is turned off as long as the frame is active, and then is // turned on again when the frame is torn down. This mechanism should // be sufficient to insure that the rules are followed. // In general you set up a frame by using the following macros // HELPER_METHOD_FRAME_BEGIN_RET*() // Use If the FCALL has a return value // HELPER_METHOD_FRAME_BEGIN*() // Use If FCALL does not return a value // HELPER_METHOD_FRAME_END*() // These macros introduce a scope which is protected by an HelperMethodFrame. // In this scope you can do EH or GC. There are rules associated with // their use. In particular // 1) These macros can only be used in the body of a FCALL (that is // something using the FCIMPL* or HCIMPL* macros for their decaration. // 2) You may not perform a 'return' within this scope.. // Compile time errors occur if you try to violate either of these rules. // The frame that is set up does NOT protect any GC variables (in particular the // arguments of the FCALL. Thus you need to do an explicit GCPROTECT once the // frame is established if you need to protect an argument. There are flavors // of HELPER_METHOD_FRAME that protect a certain number of GC variables. For // example // HELPER_METHOD_FRAME_BEGIN_RET_2(arg1, arg2) // will protect the GC variables arg1, and arg2 as well as erecting the frame. // Another invariant that you must be aware of is the need to poll to see if // a GC is needed by some other thread. Unless the FCALL is VERY short, // every code path through the FCALL must do such a poll. The important // thing here is that a poll will cause a GC, and thus you can only do it // when all you GC variables are protected. To make things easier // HELPER_METHOD_FRAMES that protect things automatically do this poll. // If you don't need to protect anything HELPER_METHOD_FRAME_BEGIN_0 // will also do the poll. // Sometimes it is convenient to do the poll a the end of the frame, you // can use HELPER_METHOD_FRAME_BEGIN_NOPOLL and HELPER_METHOD_FRAME_END_POLL // to do the poll at the end. If somewhere in the middle is the best // place you can do that too with HELPER_METHOD_POLL() // You don't need to erect a helper method frame to do a poll. FC_GC_POLL // can do this (remember all your GC refs will be trashed). // Finally if your method is VERY small, you can get away without a poll, // you have to use FC_GC_POLL_NOT_NEEDED to mark this. // Use sparingly! // It is possible to set up the frame as the first operation in the FCALL and // tear it down as the last operation before returning. This works and is // reasonably efficient (as good as an ECall), however, if it is the case that // you can defer the setup of the frame to an unlikely code path (exception path) // that is much better. // If you defer setup of the frame, all codepaths leading to the frame setup // must be wrapped with PERMIT_HELPER_METHOD_FRAME_BEGIN/END. These block // certain compiler optimizations that interfere with the delayed frame setup. // These macros are automatically included in the HCIMPL, FCIMPL, and frame // setup macros. // <TODO>TODO: we should have a way of doing a trial allocation (an allocation that // will fail if it would cause a GC). That way even FCALLs that need to allocate // would not necessarily need to set up a frame. </TODO> // It is common to only need to set up a frame in order to throw an exception. // While this can be done by doing // HELPER_METHOD_FRAME_BEGIN() // Use if FCALL does not return a value // COMPlusThrow(execpt); // HELPER_METHOD_FRAME_END() // It is more efficient (in space) to use convenience macro FCTHROW that does // this for you (sets up a frame, and does the throw). // FCTHROW(except) // Since FCALLS have to conform to the EE calling conventions and not to C // calling conventions, FCALLS, need to be declared using special macros (FCIMPL*) // that implement the correct calling conventions. There are variants of these // macros depending on the number of args, and sometimes the types of the // arguments. //------------------------------------------------------------------------ // A very simple example: // // FCIMPL2(INT32, Div, INT32 x, INT32 y) // { // if (y == 0) // FCThrow(kDivideByZeroException); // return x/y; // } // FCIMPLEND // // // *** WATCH OUT FOR THESE GOTCHAS: *** // ------------------------------------ // - In your FCDECL & FCIMPL protos, don't declare a param as type OBJECTREF // or any of its deriveds. This will break on the checked build because // __fastcall doesn't enregister C++ objects (which OBJECTREF is). // Instead, you need to do something like; // // FCIMPL(.., .., Object* pObject0) // OBJECTREF pObject = ObjectToOBJECTREF(pObject0); // FCIMPL // // For similar reasons, use Object* rather than OBJECTREF as a return type. // Consider either using ObjectToOBJECTREF or calling VALIDATEOBJECTREF // to make sure your Object* is valid. // // - FCThrow() must be called directly from your FCall impl function: it // cannot be called from a subfunction. Calling from a subfunction breaks // the VC code parsing workaround that lets us recover the callee saved registers. // Fortunately, you'll get a compile error complaining about an // unknown variable "__me". // // - If your FCall returns VOID, you must use FCThrowVoid() rather than // FCThrow(). This is because FCThrow() has to generate an unexecuted // "return" statement for the code parser. // // - On x86, if first and/or second argument of your FCall cannot be passed // in either of the __fastcall registers (ECX/EDX), you must use "V" versions // of FCDECL and FCIMPL macros to enregister arguments correctly. Some of the // most common types that fit this requirement are 64-bit values (i.e. INT64 or // UINT64) and floating-point values (i.e. FLOAT or DOUBLE). For example, FCDECL3_IVI // must be used for FCalls that take 3 arguments and 2nd argument is INT64 and // FDECL2_VV must be used for FCalls that take 2 arguments where both are FLOAT. // // - You may use structs for protecting multiple OBJECTREF's simultaneously. // In these cases, you must use a variant of a helper method frame with PROTECT // in the name, to ensure all the OBJECTREF's in the struct get protected. // Also, initialize all the OBJECTREF's first. Like this: // // FCIMPL4(Object*, COMNlsInfo::nativeChangeCaseString, LocaleIDObject* localeUNSAFE, // INT_PTR pNativeTextInfo, StringObject* pStringUNSAFE, CLR_BOOL bIsToUpper) // { // [ignoring CONTRACT for now] // struct _gc // { // STRINGREF pResult; // STRINGREF pString; // LOCALEIDREF pLocale; // } gc; // gc.pResult = NULL; // gc.pString = ObjectToSTRINGREF(pStringUNSAFE); // gc.pLocale = (LOCALEIDREF)ObjectToOBJECTREF(localeUNSAFE); // // HELPER_METHOD_FRAME_BEGIN_RET_PROTECT(gc) // // If you forgot the PROTECT part, the macro will only protect the first OBJECTREF, // introducing a subtle GC hole in your code. Fortunately, we now issue a // compile-time error if you forget. // How FCall works: // ---------------- // An FCall target uses __fastcall or some other calling convention to // match the IL calling convention exactly. Thus, a call to FCall is a direct // call to the target w/ no intervening stub or frame. // // The tricky part is when FCThrow is called. FCThrow must generate // a proper method frame before allocating and throwing the exception. // To do this, it must recover several things: // // - The location of the FCIMPL's return address (since that's // where the frame will be based.) // // - The on-entry values of the callee-saved regs; which must // be recorded in the frame so that GC can update them. // Depending on how VC compiles your FCIMPL, those values are still // in the original registers or saved on the stack. // // To figure out which, FCThrow() generates the code: // // while (NULL == __FCThrow(__me, ...)) {}; // return 0; // // The "return" statement will never execute; but its presence guarantees // that VC will follow the __FCThrow() call with a VC epilog // that restores the callee-saved registers using a pretty small // and predictable set of Intel opcodes. __FCThrow() parses this // epilog and simulates its execution to recover the callee saved // registers. // // The while loop is to prevent the compiler from doing tail call optimizations. // The helper frame interpretter needs the frame to be present. // // - The MethodDesc* that this FCall implements. This MethodDesc* // is part of the frame and ensures that the FCall will appear // in the exception's stack trace. To get this, FCDECL declares // a static local __me, initialized to point to the FC target itself. // This address is exactly what's stored in the ECall lookup tables; // so __FCThrow() simply does a reverse lookup on that table to recover // the MethodDesc*. // #ifndef __FCall_h__ #define __FCall_h__ #include "gms.h" #include "runtimeexceptionkind.h" #include "debugreturn.h" //============================================================================================== // These macros defeat compiler optimizations that might mix nonvolatile // register loads and stores with other code in the function body. This // creates problems for the frame setup code, which assumes that any // nonvolatiles that are saved at the point of the frame setup will be // re-loaded when the frame is popped. // // Currently this is only known to be an issue on AMD64. It's uncertain // whether it is an issue on x86. //============================================================================================== #if defined(TARGET_AMD64) && !defined(TARGET_UNIX) // // On AMD64 this is accomplished by including a setjmp anywhere in a function. // Doesn't matter whether it is reachable or not, and in fact in optimized // builds the setjmp is removed altogether. // #include <setjmp.h> #ifdef _DEBUG // // Linked list of unmanaged methods preceeding a HelperMethodFrame push. This // is linked onto the current Thread. Each list entry is stack-allocated so it // can be associated with an unmanaged frame. Each unmanaged frame needs to be // associated with at least one list entry. // struct HelperMethodFrameCallerList { HelperMethodFrameCallerList *pCaller; }; #endif // _DEBUG // // Resets the Thread state at a new managed -> fcall transition. // class FCallTransitionState { public: FCallTransitionState () NOT_DEBUG({ LIMITED_METHOD_CONTRACT; }); ~FCallTransitionState () NOT_DEBUG({ LIMITED_METHOD_CONTRACT; }); #ifdef _DEBUG private: Thread *m_pThread; HelperMethodFrameCallerList *m_pPreviousHelperMethodFrameCallerList; #endif // _DEBUG }; // // Pushes/pops state for each caller. // class PermitHelperMethodFrameState { public: PermitHelperMethodFrameState () NOT_DEBUG({ LIMITED_METHOD_CONTRACT; }); ~PermitHelperMethodFrameState () NOT_DEBUG({ LIMITED_METHOD_CONTRACT; }); static VOID CheckHelperMethodFramePermitted () NOT_DEBUG({ LIMITED_METHOD_CONTRACT; }); #ifdef _DEBUG private: Thread *m_pThread; HelperMethodFrameCallerList m_ListEntry; #endif // _DEBUG }; // // Resets the Thread state after the HelperMethodFrame is pushed. At this // point, the HelperMethodFrame is capable of unwinding to the managed code, // so we can reset the Thread state for any nested fcalls. // class CompletedFCallTransitionState { public: CompletedFCallTransitionState () NOT_DEBUG({ LIMITED_METHOD_CONTRACT; }); ~CompletedFCallTransitionState () NOT_DEBUG({ LIMITED_METHOD_CONTRACT; }); #ifdef _DEBUG private: HelperMethodFrameCallerList *m_pLastHelperMethodFrameCallerList; #endif // _DEBUG }; #define PERMIT_HELPER_METHOD_FRAME_BEGIN() \ if (1) \ { \ PermitHelperMethodFrameState ___PermitHelperMethodFrameState; #define PERMIT_HELPER_METHOD_FRAME_END() \ } \ else \ { \ jmp_buf ___jmpbuf; \ setjmp(___jmpbuf); \ __assume(0); \ } #define FCALL_TRANSITION_BEGIN() \ FCallTransitionState ___FCallTransitionState; \ PERMIT_HELPER_METHOD_FRAME_BEGIN(); #define FCALL_TRANSITION_END() \ PERMIT_HELPER_METHOD_FRAME_END(); #define CHECK_HELPER_METHOD_FRAME_PERMITTED() \ PermitHelperMethodFrameState::CheckHelperMethodFramePermitted(); \ CompletedFCallTransitionState ___CompletedFCallTransitionState; #else // unsupported processor #define PERMIT_HELPER_METHOD_FRAME_BEGIN() #define PERMIT_HELPER_METHOD_FRAME_END() #define FCALL_TRANSITION_BEGIN() #define FCALL_TRANSITION_END() #define CHECK_HELPER_METHOD_FRAME_PERMITTED() #endif // unsupported processor //============================================================================================== // This is where FCThrow ultimately ends up. Never call this directly. // Use the FCThrow() macros. __FCThrowArgument is the helper to throw ArgumentExceptions // with a resource taken from the managed resource manager. //============================================================================================== LPVOID __FCThrow(LPVOID me, enum RuntimeExceptionKind reKind, UINT resID, LPCWSTR arg1, LPCWSTR arg2, LPCWSTR arg3); LPVOID __FCThrowArgument(LPVOID me, enum RuntimeExceptionKind reKind, LPCWSTR argumentName, LPCWSTR resourceName); //============================================================================================== // FDECLn: A set of macros for generating header declarations for FC targets. // Use FIMPLn for the actual body. //============================================================================================== // Note: on the x86, these defs reverse all but the first two arguments // (IL stack calling convention is reversed from __fastcall.) // Calling convention for varargs #define F_CALL_VA_CONV __cdecl #ifdef TARGET_X86 // Choose the appropriate calling convention for FCALL helpers on the basis of the JIT calling convention #ifdef __GNUC__ #define F_CALL_CONV __attribute__((cdecl, regparm(3))) // GCC FCALL convention (simulated via cdecl, regparm(3)) is different from MSVC FCALL convention. GCC can use up // to 3 registers to store parameters. The registers used are EAX, EDX, ECX. Dummy parameters and reordering // of the actual parameters in the FCALL signature is used to make the calling convention to look like in MSVC. #define SWIZZLE_REGARG_ORDER #else // __GNUC__ #define F_CALL_CONV __fastcall #endif // !__GNUC__ #define SWIZZLE_STKARG_ORDER #else // TARGET_X86 // // non-x86 platforms don't have messed-up calling convention swizzling // #define F_CALL_CONV #endif // !TARGET_X86 #ifdef SWIZZLE_STKARG_ORDER #ifdef SWIZZLE_REGARG_ORDER #define FCDECL0(rettype, funcname) rettype F_CALL_CONV funcname() #define FCDECL1(rettype, funcname, a1) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, a1) #define FCDECL1_V(rettype, funcname, a1) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, int /* ECX */, a1) #define FCDECL2(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1) #define FCDECL2VA(rettype, funcname, a1, a2) rettype F_CALL_VA_CONV funcname(a1, a2, ...) #define FCDECL2_VV(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, int /* ECX */, a2, a1) #define FCDECL2_VI(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, a2, a1) #define FCDECL2_IV(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, a1, a2) #define FCDECL3(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a3) #define FCDECL3_IIV(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a3) #define FCDECL3_VII(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(int /* EAX */, a3, a2, a1) #define FCDECL3_IVV(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, a1, a3, a2) #define FCDECL3_IVI(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(int /* EAX */, a3, a1, a2) #define FCDECL3_VVI(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, a3, a2, a1) #define FCDECL3_VVV(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, int /* ECX */, a3, a2, a1) #define FCDECL4(rettype, funcname, a1, a2, a3, a4) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a4, a3) #define FCDECL5(rettype, funcname, a1, a2, a3, a4, a5) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a5, a4, a3) #define FCDECL6(rettype, funcname, a1, a2, a3, a4, a5, a6) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a6, a5, a4, a3) #define FCDECL7(rettype, funcname, a1, a2, a3, a4, a5, a6, a7) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a7, a6, a5, a4, a3) #define FCDECL8(rettype, funcname, a1, a2, a3, a4, a5, a6, a7, a8) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a8, a7, a6, a5, a4, a3) #define FCDECL9(rettype, funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a9, a8, a7, a6, a5, a4, a3) #define FCDECL10(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a10, a9, a8, a7, a6, a5, a4, a3) #define FCDECL11(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a11, a10, a9, a8, a7, a6, a5, a4, a3) #define FCDECL12(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a12, a11, a10, a9, a8, a7, a6, a5, a4, a3) #define FCDECL13(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a13, a12, a11, a10, a9, a8, a7, a6, a5, a4, a3) #define FCDECL14(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a14, a13, a12, a11, a10, a9, a8, a7, a6, a5, a4, a3) #define FCDECL5_IVI(rettype, funcname, a1, a2, a3, a4, a5) rettype F_CALL_CONV funcname(int /* EAX */, a3, a1, a5, a4, a2) #define FCDECL5_VII(rettype, funcname, a1, a2, a3, a4, a5) rettype F_CALL_CONV funcname(int /* EAX */, a3, a2, a5, a4, a1) #else // SWIZZLE_REGARG_ORDER #define FCDECL0(rettype, funcname) rettype F_CALL_CONV funcname() #define FCDECL1(rettype, funcname, a1) rettype F_CALL_CONV funcname(a1) #define FCDECL1_V(rettype, funcname, a1) rettype F_CALL_CONV funcname(a1) #define FCDECL2(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(a1, a2) #define FCDECL2VA(rettype, funcname, a1, a2) rettype F_CALL_VA_CONV funcname(a1, a2, ...) #define FCDECL2_VV(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(a2, a1) #define FCDECL2_VI(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(a2, a1) #define FCDECL2_IV(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(a1, a2) #define FCDECL3(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(a1, a2, a3) #define FCDECL3_IIV(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(a1, a2, a3) #define FCDECL3_VII(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(a2, a3, a1) #define FCDECL3_IVV(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(a1, a3, a2) #define FCDECL3_IVI(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(a1, a3, a2) #define FCDECL3_VVI(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(a2, a1, a3) #define FCDECL3_VVV(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(a3, a2, a1) #define FCDECL4(rettype, funcname, a1, a2, a3, a4) rettype F_CALL_CONV funcname(a1, a2, a4, a3) #define FCDECL5(rettype, funcname, a1, a2, a3, a4, a5) rettype F_CALL_CONV funcname(a1, a2, a5, a4, a3) #define FCDECL6(rettype, funcname, a1, a2, a3, a4, a5, a6) rettype F_CALL_CONV funcname(a1, a2, a6, a5, a4, a3) #define FCDECL7(rettype, funcname, a1, a2, a3, a4, a5, a6, a7) rettype F_CALL_CONV funcname(a1, a2, a7, a6, a5, a4, a3) #define FCDECL8(rettype, funcname, a1, a2, a3, a4, a5, a6, a7, a8) rettype F_CALL_CONV funcname(a1, a2, a8, a7, a6, a5, a4, a3) #define FCDECL9(rettype, funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9) rettype F_CALL_CONV funcname(a1, a2, a9, a8, a7, a6, a5, a4, a3) #define FCDECL10(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10) rettype F_CALL_CONV funcname(a1, a2, a10, a9, a8, a7, a6, a5, a4, a3) #define FCDECL11(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11) rettype F_CALL_CONV funcname(a1, a2, a11, a10, a9, a8, a7, a6, a5, a4, a3) #define FCDECL12(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12) rettype F_CALL_CONV funcname(a1, a2, a12, a11, a10, a9, a8, a7, a6, a5, a4, a3) #define FCDECL13(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13) rettype F_CALL_CONV funcname(a1, a2, a13, a12, a11, a10, a9, a8, a7, a6, a5, a4, a3) #define FCDECL14(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14) rettype F_CALL_CONV funcname(a1, a2, a14, a13, a12, a11, a10, a9, a8, a7, a6, a5, a4, a3) #define FCDECL5_IVI(rettype, funcname, a1, a2, a3, a4, a5) rettype F_CALL_CONV funcname(a1, a3, a5, a4, a2) #define FCDECL5_VII(rettype, funcname, a1, a2, a3, a4, a5) rettype F_CALL_CONV funcname(a2, a3, a5, a4, a1) #endif // !SWIZZLE_REGARG_ORDER #if 0 // // don't use something like this... directly calling an FCALL from within the runtime breaks stackwalking because // the FCALL reverse mapping only gets established in ECall::GetFCallImpl and that codepath is circumvented by // directly calling and FCALL // See below for usage of FC_CALL_INNER (used in SecurityStackWalk::Check presently) // #define FCCALL0(funcname) funcname() #define FCCALL1(funcname, a1) funcname(a1) #define FCCALL2(funcname, a1, a2) funcname(a1, a2) #define FCCALL3(funcname, a1, a2, a3) funcname(a1, a2, a3) #define FCCALL4(funcname, a1, a2, a3, a4) funcname(a1, a2, a4, a3) #define FCCALL5(funcname, a1, a2, a3, a4, a5) funcname(a1, a2, a5, a4, a3) #define FCCALL6(funcname, a1, a2, a3, a4, a5, a6) funcname(a1, a2, a6, a5, a4, a3) #define FCCALL7(funcname, a1, a2, a3, a4, a5, a6, a7) funcname(a1, a2, a7, a6, a5, a4, a3) #define FCCALL8(funcname, a1, a2, a3, a4, a5, a6, a7, a8) funcname(a1, a2, a8, a7, a6, a5, a4, a3) #define FCCALL9(funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9) funcname(a1, a2, a9, a8, a7, a6, a5, a4, a3) #define FCCALL10(funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10) funcname(a1, a2, a10, a9, a8, a7, a6, a5, a4, a3) #define FCCALL11(funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11) funcname(a1, a2, a11, a10, a9, a8, a7, a6, a5, a4, a3) #define FCCALL12(funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12) funcname(a1, a2, a12, a11, a10, a9, a8, a7, a6, a5, a4, a3) #endif // 0 #else // !SWIZZLE_STKARG_ORDER #define FCDECL0(rettype, funcname) rettype F_CALL_CONV funcname() #define FCDECL1(rettype, funcname, a1) rettype F_CALL_CONV funcname(a1) #define FCDECL1_V(rettype, funcname, a1) rettype F_CALL_CONV funcname(a1) #define FCDECL2(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(a1, a2) #define FCDECL2VA(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(a1, a2, ...) #define FCDECL2_VV(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(a1, a2) #define FCDECL2_VI(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(a1, a2) #define FCDECL2_IV(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(a1, a2) #define FCDECL3(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(a1, a2, a3) #define FCDECL3_IIV(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(a1, a2, a3) #define FCDECL3_VII(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(a1, a2, a3) #define FCDECL3_IVV(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(a1, a2, a3) #define FCDECL3_IVI(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(a1, a2, a3) #define FCDECL3_VVI(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(a1, a2, a3) #define FCDECL3_VVV(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(a1, a2, a3) #define FCDECL4(rettype, funcname, a1, a2, a3, a4) rettype F_CALL_CONV funcname(a1, a2, a3, a4) #define FCDECL5(rettype, funcname, a1, a2, a3, a4, a5) rettype F_CALL_CONV funcname(a1, a2, a3, a4, a5) #define FCDECL6(rettype, funcname, a1, a2, a3, a4, a5, a6) rettype F_CALL_CONV funcname(a1, a2, a3, a4, a5, a6) #define FCDECL7(rettype, funcname, a1, a2, a3, a4, a5, a6, a7) rettype F_CALL_CONV funcname(a1, a2, a3, a4, a5, a6, a7) #define FCDECL8(rettype, funcname, a1, a2, a3, a4, a5, a6, a7, a8) rettype F_CALL_CONV funcname(a1, a2, a3, a4, a5, a6, a7, a8) #define FCDECL9(rettype, funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9) rettype F_CALL_CONV funcname(a1, a2, a3, a4, a5, a6, a7, a8, a9) #define FCDECL10(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10) rettype F_CALL_CONV funcname(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10) #define FCDECL11(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11) rettype F_CALL_CONV funcname(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11) #define FCDECL12(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12) rettype F_CALL_CONV funcname(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12) #define FCDECL13(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13) rettype F_CALL_CONV funcname(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13) #define FCDECL14(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14) rettype F_CALL_CONV funcname(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14) #define FCDECL5_IVI(rettype, funcname, a1, a2, a3, a4, a5) rettype F_CALL_CONV funcname(a1, a2, a3, a4, a5) #define FCDECL5_VII(rettype, funcname, a1, a2, a3, a4, a5) rettype F_CALL_CONV funcname(a1, a2, a3, a4, a5) #endif // !SWIZZLE_STKARG_ORDER #define HELPER_FRAME_DECL(x) FrameWithCookie<HelperMethodFrame_##x##OBJ> __helperframe // use the capture state machinery if the architecture has one // // For a normal build we create a loop (see explaination on RestoreState below) // We don't want a loop here for PREFAST since that causes // warning 263: Using _alloca in a loop // And we can't use DEBUG_OK_TO_RETURN for PREFAST because the PREFAST version // requires that you already be in a DEBUG_ASSURE_NO_RETURN_BEGIN scope #define HelperMethodFrame_0OBJ HelperMethodFrame #define HELPER_FRAME_ARGS(attribs) __me, attribs #define FORLAZYMACHSTATE(x) x #if defined(_PREFAST_) #define FORLAZYMACHSTATE_BEGINLOOP(x) x #define FORLAZYMACHSTATE_ENDLOOP(x) #define FORLAZYMACHSTATE_DEBUG_OK_TO_RETURN_BEGIN #define FORLAZYMACHSTATE_DEBUG_OK_TO_RETURN_END #else #define FORLAZYMACHSTATE_BEGINLOOP(x) x do #define FORLAZYMACHSTATE_ENDLOOP(x) while(x) #define FORLAZYMACHSTATE_DEBUG_OK_TO_RETURN_BEGIN DEBUG_OK_TO_RETURN_BEGIN(LAZYMACHSTATE) #define FORLAZYMACHSTATE_DEBUG_OK_TO_RETURN_END DEBUG_OK_TO_RETURN_END(LAZYMACHSTATE) #endif // BEGIN: before gcpoll //FCallGCCanTriggerNoDtor __fcallGcCanTrigger; //__fcallGcCanTrigger.Enter(); // END: after gcpoll //__fcallGcCanTrigger.Leave(__FUNCTION__, __FILE__, __LINE__); // We have to put DEBUG_OK_TO_RETURN_BEGIN around the FORLAZYMACHSTATE // to allow the HELPER_FRAME to be installed inside an SO_INTOLERANT region // which does not allow a return. The return is used by FORLAZYMACHSTATE // to capture the state, but is not an actual return, so it is ok. #define HELPER_METHOD_FRAME_BEGIN_EX_BODY(ret, helperFrame, gcpoll, allowGC) \ FORLAZYMACHSTATE_BEGINLOOP(int alwaysZero = 0;) \ { \ INDEBUG(static BOOL __haveCheckedRestoreState = FALSE;) \ PERMIT_HELPER_METHOD_FRAME_BEGIN(); \ CHECK_HELPER_METHOD_FRAME_PERMITTED(); \ helperFrame; \ FORLAZYMACHSTATE_DEBUG_OK_TO_RETURN_BEGIN; \ FORLAZYMACHSTATE(CAPTURE_STATE(__helperframe.MachineState(), ret);) \ FORLAZYMACHSTATE_DEBUG_OK_TO_RETURN_END; \ INDEBUG(__helperframe.SetAddrOfHaveCheckedRestoreState(&__haveCheckedRestoreState)); \ DEBUG_ASSURE_NO_RETURN_BEGIN(HELPER_METHOD_FRAME); \ INCONTRACT(FCallGCCanTrigger::Enter()); #define HELPER_METHOD_FRAME_BEGIN_EX(ret, helperFrame, gcpoll, allowGC) \ HELPER_METHOD_FRAME_BEGIN_EX_BODY(ret, helperFrame, gcpoll, allowGC) \ /* <TODO>TODO TURN THIS ON!!! </TODO> */ \ /* gcpoll; */ \ INSTALL_MANAGED_EXCEPTION_DISPATCHER; \ __helperframe.Push(); \ MAKE_CURRENT_THREAD_AVAILABLE_EX(__helperframe.GetThread()); \ INSTALL_UNWIND_AND_CONTINUE_HANDLER_FOR_HMF(&__helperframe); #define HELPER_METHOD_FRAME_BEGIN_EX_NOTHROW(ret, helperFrame, gcpoll, allowGC, probeFailExpr) \ HELPER_METHOD_FRAME_BEGIN_EX_BODY(ret, helperFrame, gcpoll, allowGC) \ __helperframe.Push(); \ MAKE_CURRENT_THREAD_AVAILABLE_EX(__helperframe.GetThread()); \ /* <TODO>TODO TURN THIS ON!!! </TODO> */ \ /* gcpoll; */ // The while(__helperframe.RestoreState() needs a bit of explanation. // The issue is insuring that the same machine state (which registers saved) // exists when the machine state is probed (when the frame is created, and // when it is actually used (when the frame is popped. We do this by creating // a flow of control from use to def. Note that 'RestoreState' always returns false // we never actually loop, but the compiler does not know that, and thus // will be forced to make the keep the state of register spills the same at // the two locations. #define HELPER_METHOD_FRAME_END_EX_BODY(gcpoll,allowGC) \ /* <TODO>TODO TURN THIS ON!!! </TODO> */ \ /* gcpoll; */ \ DEBUG_ASSURE_NO_RETURN_END(HELPER_METHOD_FRAME); \ INCONTRACT(FCallGCCanTrigger::Leave(__FUNCTION__, __FILE__, __LINE__)); \ FORLAZYMACHSTATE(alwaysZero = \ HelperMethodFrameRestoreState(INDEBUG_COMMA(&__helperframe) \ __helperframe.MachineState());) \ PERMIT_HELPER_METHOD_FRAME_END() \ } FORLAZYMACHSTATE_ENDLOOP(alwaysZero); #define HELPER_METHOD_FRAME_END_EX(gcpoll,allowGC) \ UNINSTALL_UNWIND_AND_CONTINUE_HANDLER; \ __helperframe.Pop(); \ UNINSTALL_MANAGED_EXCEPTION_DISPATCHER; \ HELPER_METHOD_FRAME_END_EX_BODY(gcpoll,allowGC); #define HELPER_METHOD_FRAME_END_EX_NOTHROW(gcpoll,allowGC) \ __helperframe.Pop(); \ HELPER_METHOD_FRAME_END_EX_BODY(gcpoll,allowGC); #define HELPER_METHOD_FRAME_BEGIN_ATTRIB(attribs) \ HELPER_METHOD_FRAME_BEGIN_EX( \ return, \ HELPER_FRAME_DECL(0)(HELPER_FRAME_ARGS(attribs)), \ HELPER_METHOD_POLL(),TRUE) #define HELPER_METHOD_FRAME_BEGIN_0() \ HELPER_METHOD_FRAME_BEGIN_ATTRIB(Frame::FRAME_ATTR_NONE) #define HELPER_METHOD_FRAME_BEGIN_ATTRIB_NOPOLL(attribs) \ HELPER_METHOD_FRAME_BEGIN_EX( \ return, \ HELPER_FRAME_DECL(0)(HELPER_FRAME_ARGS(attribs)), \ {},FALSE) #define HELPER_METHOD_FRAME_BEGIN_NOPOLL() HELPER_METHOD_FRAME_BEGIN_ATTRIB_NOPOLL(Frame::FRAME_ATTR_NONE) #define HELPER_METHOD_FRAME_BEGIN_ATTRIB_1(attribs, arg1) \ static_assert(sizeof(arg1) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\ HELPER_METHOD_FRAME_BEGIN_EX( \ return, \ HELPER_FRAME_DECL(1)(HELPER_FRAME_ARGS(attribs), \ (OBJECTREF*) &arg1), \ HELPER_METHOD_POLL(),TRUE) #define HELPER_METHOD_FRAME_BEGIN_1(arg1) HELPER_METHOD_FRAME_BEGIN_ATTRIB_1(Frame::FRAME_ATTR_NONE, arg1) #define HELPER_METHOD_FRAME_BEGIN_ATTRIB_2(attribs, arg1, arg2) \ static_assert(sizeof(arg1) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\ static_assert(sizeof(arg2) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\ HELPER_METHOD_FRAME_BEGIN_EX( \ return, \ HELPER_FRAME_DECL(2)(HELPER_FRAME_ARGS(attribs), \ (OBJECTREF*) &arg1, (OBJECTREF*) &arg2), \ HELPER_METHOD_POLL(),TRUE) #define HELPER_METHOD_FRAME_BEGIN_2(arg1, arg2) HELPER_METHOD_FRAME_BEGIN_ATTRIB_2(Frame::FRAME_ATTR_NONE, arg1, arg2) #define HELPER_METHOD_FRAME_BEGIN_ATTRIB_3(attribs, arg1, arg2, arg3) \ static_assert(sizeof(arg1) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\ static_assert(sizeof(arg2) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\ static_assert(sizeof(arg3) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\ HELPER_METHOD_FRAME_BEGIN_EX( \ return, \ HELPER_FRAME_DECL(3)(HELPER_FRAME_ARGS(attribs), \ (OBJECTREF*) &arg1, (OBJECTREF*) &arg2, (OBJECTREF*) &arg3), \ HELPER_METHOD_POLL(),TRUE) #define HELPER_METHOD_FRAME_BEGIN_3(arg1, arg2, arg3) HELPER_METHOD_FRAME_BEGIN_ATTRIB_3(Frame::FRAME_ATTR_NONE, arg1, arg2, arg3) #define HELPER_METHOD_FRAME_BEGIN_PROTECT(gc) \ HELPER_METHOD_FRAME_BEGIN_EX( \ return, \ HELPER_FRAME_DECL(PROTECT)(HELPER_FRAME_ARGS(Frame::FRAME_ATTR_NONE), \ (OBJECTREF*)&(gc), sizeof(gc)/sizeof(OBJECTREF)), \ HELPER_METHOD_POLL(),TRUE) #define HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB_NOPOLL(attribs) \ HELPER_METHOD_FRAME_BEGIN_EX( \ return 0, \ HELPER_FRAME_DECL(0)(HELPER_FRAME_ARGS(attribs)), \ {},FALSE) #define HELPER_METHOD_FRAME_BEGIN_RET_VC_ATTRIB_NOPOLL(attribs) \ HELPER_METHOD_FRAME_BEGIN_EX( \ FC_RETURN_VC(), \ HELPER_FRAME_DECL(0)(HELPER_FRAME_ARGS(attribs)), \ {},FALSE) #define HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB(attribs) \ HELPER_METHOD_FRAME_BEGIN_EX( \ return 0, \ HELPER_FRAME_DECL(0)(HELPER_FRAME_ARGS(attribs)), \ HELPER_METHOD_POLL(),TRUE) #define HELPER_METHOD_FRAME_BEGIN_RET_0() \ HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB(Frame::FRAME_ATTR_NONE) #define HELPER_METHOD_FRAME_BEGIN_RET_VC_0() \ HELPER_METHOD_FRAME_BEGIN_EX( \ FC_RETURN_VC(), \ HELPER_FRAME_DECL(0)(HELPER_FRAME_ARGS(Frame::FRAME_ATTR_NONE)), \ HELPER_METHOD_POLL(),TRUE) #define HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB_1(attribs, arg1) \ static_assert(sizeof(arg1) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\ HELPER_METHOD_FRAME_BEGIN_EX( \ return 0, \ HELPER_FRAME_DECL(1)(HELPER_FRAME_ARGS(attribs), \ (OBJECTREF*) &arg1), \ HELPER_METHOD_POLL(),TRUE) #define HELPER_METHOD_FRAME_BEGIN_RET_NOTHROW_1(probeFailExpr, arg1) \ static_assert(sizeof(arg1) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\ HELPER_METHOD_FRAME_BEGIN_EX_NOTHROW( \ return 0, \ HELPER_FRAME_DECL(1)(HELPER_FRAME_ARGS(Frame::FRAME_ATTR_NO_THREAD_ABORT), \ (OBJECTREF*) &arg1), \ HELPER_METHOD_POLL(), TRUE, probeFailExpr) #define HELPER_METHOD_FRAME_BEGIN_RET_VC_ATTRIB_1(attribs, arg1) \ static_assert(sizeof(arg1) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\ HELPER_METHOD_FRAME_BEGIN_EX( \ FC_RETURN_VC(), \ HELPER_FRAME_DECL(1)(HELPER_FRAME_ARGS(attribs), \ (OBJECTREF*) &arg1), \ HELPER_METHOD_POLL(),TRUE) #define HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB_2(attribs, arg1, arg2) \ static_assert(sizeof(arg1) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\ static_assert(sizeof(arg2) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\ HELPER_METHOD_FRAME_BEGIN_EX( \ return 0, \ HELPER_FRAME_DECL(2)(HELPER_FRAME_ARGS(attribs), \ (OBJECTREF*) &arg1, (OBJECTREF*) &arg2), \ HELPER_METHOD_POLL(),TRUE) #define HELPER_METHOD_FRAME_BEGIN_RET_VC_ATTRIB_2(attribs, arg1, arg2) \ static_assert(sizeof(arg1) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\ static_assert(sizeof(arg2) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\ HELPER_METHOD_FRAME_BEGIN_EX( \ FC_RETURN_VC(), \ HELPER_FRAME_DECL(2)(HELPER_FRAME_ARGS(attribs), \ (OBJECTREF*) &arg1, (OBJECTREF*) &arg2), \ HELPER_METHOD_POLL(),TRUE) #define HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB_PROTECT(attribs, gc) \ HELPER_METHOD_FRAME_BEGIN_EX( \ return 0, \ HELPER_FRAME_DECL(PROTECT)(HELPER_FRAME_ARGS(attribs), \ (OBJECTREF*)&(gc), sizeof(gc)/sizeof(OBJECTREF)), \ HELPER_METHOD_POLL(),TRUE) #define HELPER_METHOD_FRAME_BEGIN_RET_VC_NOPOLL() \ HELPER_METHOD_FRAME_BEGIN_RET_VC_ATTRIB_NOPOLL(Frame::FRAME_ATTR_NONE) #define HELPER_METHOD_FRAME_BEGIN_RET_NOPOLL() \ HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB_NOPOLL(Frame::FRAME_ATTR_NONE) #define HELPER_METHOD_FRAME_BEGIN_RET_1(arg1) \ static_assert(sizeof(arg1) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\ HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB_1(Frame::FRAME_ATTR_NONE, arg1) #define HELPER_METHOD_FRAME_BEGIN_RET_VC_1(arg1) \ static_assert(sizeof(arg1) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\ HELPER_METHOD_FRAME_BEGIN_RET_VC_ATTRIB_1(Frame::FRAME_ATTR_NONE, arg1) #define HELPER_METHOD_FRAME_BEGIN_RET_2(arg1, arg2) \ static_assert(sizeof(arg1) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\ static_assert(sizeof(arg2) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\ HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB_2(Frame::FRAME_ATTR_NONE, arg1, arg2) #define HELPER_METHOD_FRAME_BEGIN_RET_VC_2(arg1, arg2) \ static_assert(sizeof(arg1) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\ static_assert(sizeof(arg2) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\ HELPER_METHOD_FRAME_BEGIN_RET_VC_ATTRIB_2(Frame::FRAME_ATTR_NONE, arg1, arg2) #define HELPER_METHOD_FRAME_BEGIN_RET_PROTECT(gc) \ HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB_PROTECT(Frame::FRAME_ATTR_NONE, gc) #define HELPER_METHOD_FRAME_END() HELPER_METHOD_FRAME_END_EX({},FALSE) #define HELPER_METHOD_FRAME_END_POLL() HELPER_METHOD_FRAME_END_EX(HELPER_METHOD_POLL(),TRUE) #define HELPER_METHOD_FRAME_END_NOTHROW()HELPER_METHOD_FRAME_END_EX_NOTHROW({},FALSE) // This is the fastest way to do a GC poll if you have already erected a HelperMethodFrame #define HELPER_METHOD_POLL() { __helperframe.Poll(); INCONTRACT(__fCallCheck.SetDidPoll()); } // The HelperMethodFrame knows how to get its return address. Let other code get at it, too. // (Uses comma operator to call InsureInit & discard result. #define HELPER_METHOD_FRAME_GET_RETURN_ADDRESS() \ ( static_cast<UINT_PTR>( (__helperframe.InsureInit(false, NULL)), (__helperframe.MachineState()->GetRetAddr()) ) ) // Very short routines, or routines that are guarenteed to force GC or EH // don't need to poll the GC. USE VERY SPARINGLY!!! #define FC_GC_POLL_NOT_NEEDED() INCONTRACT(__fCallCheck.SetNotNeeded()) Object* FC_GCPoll(void* me, Object* objToProtect = NULL); #define FC_GC_POLL_EX(ret) \ { \ INCONTRACT(Thread::TriggersGC(GetThread());) \ INCONTRACT(__fCallCheck.SetDidPoll();) \ if (g_TrapReturningThreads.LoadWithoutBarrier()) \ { \ if (FC_GCPoll(__me)) \ return ret; \ while (0 == FC_NO_TAILCALL) { }; /* side effect the compile can't remove */ \ } \ } #define FC_GC_POLL() FC_GC_POLL_EX(;) #define FC_GC_POLL_RET() FC_GC_POLL_EX(0) #define FC_GC_POLL_AND_RETURN_OBJREF(obj) \ { \ INCONTRACT(__fCallCheck.SetDidPoll();) \ Object* __temp = OBJECTREFToObject(obj); \ if (g_TrapReturningThreads.LoadWithoutBarrier()) \ { \ __temp = FC_GCPoll(__me, __temp); \ while (0 == FC_NO_TAILCALL) { }; /* side effect the compile can't remove */ \ } \ return __temp; \ } #if defined(ENABLE_CONTRACTS) #define FC_CAN_TRIGGER_GC() FCallGCCanTrigger::Enter() #define FC_CAN_TRIGGER_GC_END() FCallGCCanTrigger::Leave(__FUNCTION__, __FILE__, __LINE__) #define FC_CAN_TRIGGER_GC_HAVE_THREAD(thread) FCallGCCanTrigger::Enter(thread) #define FC_CAN_TRIGGER_GC_HAVE_THREADEND(thread) FCallGCCanTrigger::Leave(thread, __FUNCTION__, __FILE__, __LINE__) // turns on forbidGC for the lifetime of the instance class ForbidGC { protected: Thread *m_pThread; public: ForbidGC(const char *szFile, int lineNum); ~ForbidGC(); }; // this little helper class checks to make certain // 1) ForbidGC is set throughout the routine. // 2) Sometime during the routine, a GC poll is done class FCallCheck : public ForbidGC { public: FCallCheck(const char *szFile, int lineNum); ~FCallCheck(); void SetDidPoll() {LIMITED_METHOD_CONTRACT; didGCPoll = true; } void SetNotNeeded() {LIMITED_METHOD_CONTRACT; notNeeded = true; } private: #ifdef _DEBUG DWORD unbreakableLockCount; #endif bool didGCPoll; // GC poll was done bool notNeeded; // GC poll not needed unsigned __int64 startTicks; // tick count at beginning of FCall }; // FC_COMMON_PROLOG is used for both FCalls and HCalls #define FC_COMMON_PROLOG(target, assertFn) \ /* The following line has to be first. We do not want to trash last error */ \ DWORD __lastError = ::GetLastError(); \ static void* __cache = 0; \ assertFn(__cache, (LPVOID)target); \ { \ Thread *_pThread = GetThread(); \ Thread::ObjectRefFlush(_pThread); \ } \ FCallCheck __fCallCheck(__FILE__, __LINE__); \ FCALL_TRANSITION_BEGIN(); \ ::SetLastError(__lastError); \ void FCallAssert(void*& cache, void* target); void HCallAssert(void*& cache, void* target); #else #define FC_COMMON_PROLOG(target, assertFn) FCALL_TRANSITION_BEGIN() #define FC_CAN_TRIGGER_GC() #define FC_CAN_TRIGGER_GC_END() #endif // ENABLE_CONTRACTS // #FC_INNER // Macros that allows fcall to be split into two function to avoid the helper frame overhead on common fast // codepaths. // // The helper routine needs to know the name of the routine that called it so that it can look up the name of // the managed routine this code is associted with (for managed stack traces). This is passed with the // FC_INNER_PROLOG macro. // // The helper can set up a HELPER_METHOD_FRAME, but should pass the // Frame::FRAME_ATTR_EXACT_DEPTH|Frame::FRAME_ATTR_CAPTURE_DEPTH_2 which indicates the exact number of // unwinds to do to get back to managed code. Currently we only support depth 2 which means that the // HELPER_METHOD_FRAME needs to be set up in the function directly called by the FCALL. The helper should // use the NOINLINE macro to prevent the compiler from inlining it into the FCALL (which would obviously // mess up the unwind count). // // The other invarient that needs to hold is that the epilog walker needs to be able to get from the call to // the helper routine to the end of the FCALL using trivial heurisitics. The easiest (and only supported) // way of doing this is to place your helper right before a return (eg at the end of the method). Generally // this is not a problem at all, since the FCALL itself will pick off some common case and then tail-call to // the helper for everything else. You must use the code:FC_INNER_RETURN macros to do the call, to insure // that the C++ compiler does not tail-call optimize the call to the inner function and mess up the stack // depth. // // see code:ObjectNative::GetClass for an example // #define FC_INNER_PROLOG(outerfuncname) \ LPVOID __me; \ __me = GetEEFuncEntryPointMacro(outerfuncname); \ FC_CAN_TRIGGER_GC(); \ INCONTRACT(FCallCheck __fCallCheck(__FILE__, __LINE__)); // This variant should be used for inner fcall functions that have the // __me value passed as an argument to the function. This allows // inner functions to be shared across multiple fcalls. #define FC_INNER_PROLOG_NO_ME_SETUP() \ FC_CAN_TRIGGER_GC(); \ INCONTRACT(FCallCheck __fCallCheck(__FILE__, __LINE__)); #define FC_INNER_EPILOG() \ FC_CAN_TRIGGER_GC_END(); // If you are using FC_INNER, and you are tail calling to the helper method (a common case), then you need // to use the FC_INNER_RETURN macros (there is one for methods that return a value and another if the // function returns void). This macro's purpose is to inhibit any tail calll optimization the C++ compiler // might do, which would otherwise confuse the epilog walker. // // * See #FC_INNER for more extern RAW_KEYWORD(volatile) int FC_NO_TAILCALL; #define FC_INNER_RETURN(type, expr) \ type __retVal = expr; \ while (0 == FC_NO_TAILCALL) { }; /* side effect the compile can't remove */ \ return(__retVal); #define FC_INNER_RETURN_VOID(stmt) \ stmt; \ while (0 == FC_NO_TAILCALL) { }; /* side effect the compile can't remove */ \ return; //============================================================================================== // FIMPLn: A set of macros for generating the proto for the actual // implementation (use FDECLN for header protos.) // // The hidden "__me" variable lets us recover the original MethodDesc* // so any thrown exceptions will have the correct stack trace. FCThrow() // passes this along to __FCThrowInternal(). //============================================================================================== #define GetEEFuncEntryPointMacro(func) ((LPVOID)(func)) #define FCIMPL_PROLOG(funcname) \ LPVOID __me; \ __me = GetEEFuncEntryPointMacro(funcname); \ FC_COMMON_PROLOG(__me, FCallAssert) #if defined(_DEBUG) && !defined(__GNUC__) // Build the list of all fcalls signatures. It is used in binder.cpp to verify // compatibility of managed and unmanaged fcall signatures. The check is currently done // for x86 only. #define CHECK_FCALL_SIGNATURE #endif #ifdef CHECK_FCALL_SIGNATURE struct FCSigCheck { public: FCSigCheck(void* fnc, const char* sig) { LIMITED_METHOD_CONTRACT; func = fnc; signature = sig; next = g_pFCSigCheck; g_pFCSigCheck = this; } FCSigCheck* next; void* func; const char* signature; static FCSigCheck* g_pFCSigCheck; }; #define FCSIGCHECK(funcname, signature) \ static FCSigCheck UNIQUE_LABEL(FCSigCheck)(GetEEFuncEntryPointMacro(funcname), signature); #else // CHECK_FCALL_SIGNATURE #define FCSIGCHECK(funcname, signature) #endif // !CHECK_FCALL_SIGNATURE #ifdef SWIZZLE_STKARG_ORDER #ifdef SWIZZLE_REGARG_ORDER #define FCIMPL0(rettype, funcname) rettype F_CALL_CONV funcname() { FCIMPL_PROLOG(funcname) #define FCIMPL1(rettype, funcname, a1) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, a1) { FCIMPL_PROLOG(funcname) #define FCIMPL1_V(rettype, funcname, a1) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, int /* ECX */, a1) { FCIMPL_PROLOG(funcname) #define FCIMPL2(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1) { FCIMPL_PROLOG(funcname) #define FCIMPL2_VV(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, int /* ECX */, a2, a1) { FCIMPL_PROLOG(funcname) #define FCIMPL2_VI(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, a2, a1) { FCIMPL_PROLOG(funcname) #define FCIMPL2_IV(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, a1, a2) { FCIMPL_PROLOG(funcname) #define FCIMPL3(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL3_IIV(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL3_VII(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(int /* EAX */, a3, a2, a1) { FCIMPL_PROLOG(funcname) #define FCIMPL3_IVV(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, a1, a3, a2) { FCIMPL_PROLOG(funcname) #define FCIMPL3_IVI(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(int /* EAX */, a3, a1, a2) { FCIMPL_PROLOG(funcname) #define FCIMPL3_VVI(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, a3, a2, a1) { FCIMPL_PROLOG(funcname) #define FCIMPL3_VVV(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, int /* ECX */, a3, a2, a1) { FCIMPL_PROLOG(funcname) #define FCIMPL4(rettype, funcname, a1, a2, a3, a4) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL5(rettype, funcname, a1, a2, a3, a4, a5) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a5, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL6(rettype, funcname, a1, a2, a3, a4, a5, a6) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL7(rettype, funcname, a1, a2, a3, a4, a5, a6, a7) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a7, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL8(rettype, funcname, a1, a2, a3, a4, a5, a6, a7, a8) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a8, a7, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL9(rettype, funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a9, a8, a7, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL10(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a10, a9, a8, a7, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL11(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a11, a10, a9, a8, a7, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL12(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a12, a11, a10, a9, a8, a7, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL13(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a13, a12, a11, a10, a9, a8, a7, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL14(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a14, a13, a12, a11, a10, a9, a8, a7, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL5_IVI(rettype, funcname, a1, a2, a3, a4, a5) rettype F_CALL_CONV funcname(int /* EAX */, a3, a1, a5, a4, a2) { FCIMPL_PROLOG(funcname) #define FCIMPL5_VII(rettype, funcname, a1, a2, a3, a4, a5) rettype F_CALL_CONV funcname(int /* EAX */, a3, a2, a5, a4, a1) { FCIMPL_PROLOG(funcname) #else // SWIZZLE_REGARG_ORDER #define FCIMPL0(rettype, funcname) FCSIGCHECK(funcname, #rettype) \ rettype F_CALL_CONV funcname() { FCIMPL_PROLOG(funcname) #define FCIMPL1(rettype, funcname, a1) FCSIGCHECK(funcname, #rettype "," #a1) \ rettype F_CALL_CONV funcname(a1) { FCIMPL_PROLOG(funcname) #define FCIMPL1_V(rettype, funcname, a1) FCSIGCHECK(funcname, #rettype "," "V" #a1) \ rettype F_CALL_CONV funcname(a1) { FCIMPL_PROLOG(funcname) #define FCIMPL2(rettype, funcname, a1, a2) FCSIGCHECK(funcname, #rettype "," #a1 "," #a2) \ rettype F_CALL_CONV funcname(a1, a2) { FCIMPL_PROLOG(funcname) #define FCIMPL2VA(rettype, funcname, a1, a2) FCSIGCHECK(funcname, #rettype "," #a1 "," #a2 "," "...") \ rettype F_CALL_VA_CONV funcname(a1, a2, ...) { FCIMPL_PROLOG(funcname) #define FCIMPL2_VV(rettype, funcname, a1, a2) FCSIGCHECK(funcname, #rettype "," "V" #a1 "," "V" #a2) \ rettype F_CALL_CONV funcname(a2, a1) { FCIMPL_PROLOG(funcname) #define FCIMPL2_VI(rettype, funcname, a1, a2) FCSIGCHECK(funcname, #rettype "," "V" #a1 "," #a2) \ rettype F_CALL_CONV funcname(a2, a1) { FCIMPL_PROLOG(funcname) #define FCIMPL2_IV(rettype, funcname, a1, a2) FCSIGCHECK(funcname, #rettype "," #a1 "," "V" #a2) \ rettype F_CALL_CONV funcname(a1, a2) { FCIMPL_PROLOG(funcname) #define FCIMPL3(rettype, funcname, a1, a2, a3) FCSIGCHECK(funcname, #rettype "," #a1 "," #a2 "," #a3) \ rettype F_CALL_CONV funcname(a1, a2, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL3_IIV(rettype, funcname, a1, a2, a3) FCSIGCHECK(funcname, #rettype "," #a1 "," #a2 "," "V" #a3) \ rettype F_CALL_CONV funcname(a1, a2, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL3_VII(rettype, funcname, a1, a2, a3) FCSIGCHECK(funcname, #rettype "," "V" #a1 "," #a2 "," #a3) \ rettype F_CALL_CONV funcname(a2, a3, a1) { FCIMPL_PROLOG(funcname) #define FCIMPL3_IVV(rettype, funcname, a1, a2, a3) FCSIGCHECK(funcname, #rettype "," #a1 "," "V" #a2 "," "V" #a3) \ rettype F_CALL_CONV funcname(a1, a3, a2) { FCIMPL_PROLOG(funcname) #define FCIMPL3_IVI(rettype, funcname, a1, a2, a3) FCSIGCHECK(funcname, #rettype "," #a1 "," "V" #a2 "," #a3) \ rettype F_CALL_CONV funcname(a1, a3, a2) { FCIMPL_PROLOG(funcname) #define FCIMPL3_VVI(rettype, funcname, a1, a2, a3) FCSIGCHECK(funcname, #rettype "," "V" #a1 "," "V" #a2 "," #a3) \ rettype F_CALL_CONV funcname(a2, a1, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL3_VVV(rettype, funcname, a1, a2, a3) FCSIGCHECK(funcname, #rettype "," "V" #a1 "," "V" #a2 "," "V" #a3) \ rettype F_CALL_CONV funcname(a3, a2, a1) { FCIMPL_PROLOG(funcname) #define FCIMPL4(rettype, funcname, a1, a2, a3, a4) FCSIGCHECK(funcname, #rettype "," #a1 "," #a2 "," #a3 "," #a4) \ rettype F_CALL_CONV funcname(a1, a2, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL5(rettype, funcname, a1, a2, a3, a4, a5) FCSIGCHECK(funcname, #rettype "," #a1 "," #a2 "," #a3 "," #a4 "," #a5) \ rettype F_CALL_CONV funcname(a1, a2, a5, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL6(rettype, funcname, a1, a2, a3, a4, a5, a6) FCSIGCHECK(funcname, #rettype "," #a1 "," #a2 "," #a3 "," #a4 "," #a5 "," #a6) \ rettype F_CALL_CONV funcname(a1, a2, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL7(rettype, funcname, a1, a2, a3, a4, a5, a6, a7) FCSIGCHECK(funcname, #rettype "," #a1 "," #a2 "," #a3 "," #a4 "," #a5 "," #a6 "," #a7) \ rettype F_CALL_CONV funcname(a1, a2, a7, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL8(rettype, funcname, a1, a2, a3, a4, a5, a6, a7, a8) FCSIGCHECK(funcname, #rettype "," #a1 "," #a2 "," #a3 "," #a4 "," #a5 "," #a6 "," #a7 "," #a8) \ rettype F_CALL_CONV funcname(a1, a2, a8, a7, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL9(rettype, funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9) FCSIGCHECK(funcname, #rettype "," #a1 "," #a2 "," #a3 "," #a4 "," #a5 "," #a6 "," #a7 "," #a8 "," #a9) \ rettype F_CALL_CONV funcname(a1, a2, a9, a8, a7, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL10(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10) FCSIGCHECK(funcname, #rettype "," #a1 "," #a2 "," #a3 "," #a4 "," #a5 "," #a6 "," #a7 "," #a8 "," #a9 "," #a10) \ rettype F_CALL_CONV funcname(a1, a2, a10, a9, a8, a7, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL11(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11) FCSIGCHECK(funcname, #rettype "," #a1 "," #a2 "," #a3 "," #a4 "," #a5 "," #a6 "," #a7 "," #a8 "," #a9 "," #a10 "," #a11) \ rettype F_CALL_CONV funcname(a1, a2, a11, a10, a9, a8, a7, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL12(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12) FCSIGCHECK(funcname, #rettype "," #a1 "," #a2 "," #a3 "," #a4 "," #a5 "," #a6 "," #a7 "," #a8 "," #a9 "," #a10 "," #a11 "," #a12) \ rettype F_CALL_CONV funcname(a1, a2, a12, a11, a10, a9, a8, a7, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL13(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13) FCSIGCHECK(funcname, #rettype "," #a1 "," #a2 "," #a3 "," #a4 "," #a5 "," #a6 "," #a7 "," #a8 "," #a9 "," #a10 "," #a11 "," #a12 "," #a13) \ rettype F_CALL_CONV funcname(a1, a2, a13, a12, a11, a10, a9, a8, a7, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL14(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14) FCSIGCHECK(funcname, #rettype "," #a1 "," #a2 "," #a3 "," #a4 "," #a5 "," #a6 "," #a7 "," #a8 "," #a9 "," #a10 "," #a11 "," #a12 "," #a13 "," #a14) \ rettype F_CALL_CONV funcname(a1, a2, a14, a13, a12, a11, a10, a9, a8, a7, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL5_IVI(rettype, funcname, a1, a2, a3, a4, a5) FCSIGCHECK(funcname, #rettype "," #a1 "," "V" #a2 "," #a3 "," #a4 "," #a5) \ rettype F_CALL_CONV funcname(a1, a3, a5, a4, a2) { FCIMPL_PROLOG(funcname) #define FCIMPL5_VII(rettype, funcname, a1, a2, a3, a4, a5) FCSIGCHECK(funcname, #rettype "," "V" #a1 "," #a2 "," #a3 "," #a4 "," #a5) \ rettype F_CALL_CONV funcname(a2, a3, a5, a4, a1) { FCIMPL_PROLOG(funcname) #endif // !SWIZZLE_REGARG_ORDER #else // SWIZZLE_STKARG_ORDER #define FCIMPL0(rettype, funcname) rettype funcname() { FCIMPL_PROLOG(funcname) #define FCIMPL1(rettype, funcname, a1) rettype funcname(a1) { FCIMPL_PROLOG(funcname) #define FCIMPL1_V(rettype, funcname, a1) rettype funcname(a1) { FCIMPL_PROLOG(funcname) #define FCIMPL2(rettype, funcname, a1, a2) rettype funcname(a1, a2) { FCIMPL_PROLOG(funcname) #define FCIMPL2VA(rettype, funcname, a1, a2) rettype funcname(a1, a2, ...) { FCIMPL_PROLOG(funcname) #define FCIMPL2_VV(rettype, funcname, a1, a2) rettype funcname(a1, a2) { FCIMPL_PROLOG(funcname) #define FCIMPL2_VI(rettype, funcname, a1, a2) rettype funcname(a1, a2) { FCIMPL_PROLOG(funcname) #define FCIMPL2_IV(rettype, funcname, a1, a2) rettype funcname(a1, a2) { FCIMPL_PROLOG(funcname) #define FCIMPL3(rettype, funcname, a1, a2, a3) rettype funcname(a1, a2, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL3_IIV(rettype, funcname, a1, a2, a3) rettype funcname(a1, a2, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL3_IVV(rettype, funcname, a1, a2, a3) rettype funcname(a1, a2, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL3_VII(rettype, funcname, a1, a2, a3) rettype funcname(a1, a2, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL3_IVI(rettype, funcname, a1, a2, a3) rettype funcname(a1, a2, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL3_VVI(rettype, funcname, a1, a2, a3) rettype funcname(a1, a2, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL3_VVV(rettype, funcname, a1, a2, a3) rettype funcname(a1, a2, a3) { FCIMPL_PROLOG(funcname) #define FCIMPL4(rettype, funcname, a1, a2, a3, a4) rettype funcname(a1, a2, a3, a4) { FCIMPL_PROLOG(funcname) #define FCIMPL5(rettype, funcname, a1, a2, a3, a4, a5) rettype funcname(a1, a2, a3, a4, a5) { FCIMPL_PROLOG(funcname) #define FCIMPL6(rettype, funcname, a1, a2, a3, a4, a5, a6) rettype funcname(a1, a2, a3, a4, a5, a6) { FCIMPL_PROLOG(funcname) #define FCIMPL7(rettype, funcname, a1, a2, a3, a4, a5, a6, a7) rettype funcname(a1, a2, a3, a4, a5, a6, a7) { FCIMPL_PROLOG(funcname) #define FCIMPL8(rettype, funcname, a1, a2, a3, a4, a5, a6, a7, a8) rettype funcname(a1, a2, a3, a4, a5, a6, a7, a8) { FCIMPL_PROLOG(funcname) #define FCIMPL9(rettype, funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9) rettype funcname(a1, a2, a3, a4, a5, a6, a7, a8, a9) { FCIMPL_PROLOG(funcname) #define FCIMPL10(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10) rettype funcname(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10) { FCIMPL_PROLOG(funcname) #define FCIMPL11(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11) rettype funcname(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11) { FCIMPL_PROLOG(funcname) #define FCIMPL12(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12) rettype funcname(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12) { FCIMPL_PROLOG(funcname) #define FCIMPL13(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13) rettype funcname(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13) { FCIMPL_PROLOG(funcname) #define FCIMPL14(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14) rettype funcname(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14) { FCIMPL_PROLOG(funcname) #define FCIMPL5_IVI(rettype, funcname, a1, a2, a3, a4, a5) rettype funcname(a1, a2, a3, a4, a5) { FCIMPL_PROLOG(funcname) #define FCIMPL5_VII(rettype, funcname, a1, a2, a3, a4, a5) rettype funcname(a1, a2, a3, a4, a5) { FCIMPL_PROLOG(funcname) #endif // !SWIZZLE_STKARG_ORDER //============================================================================================== // Use this to terminte an FCIMPLEND. //============================================================================================== #define FCIMPL_EPILOG() FCALL_TRANSITION_END() #define FCIMPLEND FCIMPL_EPILOG(); } #define HCIMPL_PROLOG(funcname) LPVOID __me; __me = 0; FC_COMMON_PROLOG(funcname, HCallAssert) // HCIMPL macros are just like their FCIMPL counterparts, however // they do not remember the function they come from. Thus they will not // show up in a stack trace. This is what you want for JIT helpers and the like #ifdef SWIZZLE_STKARG_ORDER #ifdef SWIZZLE_REGARG_ORDER #define HCIMPL0(rettype, funcname) rettype F_CALL_CONV funcname() { HCIMPL_PROLOG(funcname) #define HCIMPL1(rettype, funcname, a1) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, a1) { HCIMPL_PROLOG(funcname) #define HCIMPL1_RAW(rettype, funcname, a1) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, a1) { #define HCIMPL1_V(rettype, funcname, a1) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, int /* ECX */, a1) { HCIMPL_PROLOG(funcname) #define HCIMPL2(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1) { HCIMPL_PROLOG(funcname) #define HCIMPL2_RAW(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1) { #define HCIMPL2_VV(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, int /* ECX */, a2, a1) { HCIMPL_PROLOG(funcname) #define HCIMPL2_IV(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, a1, a2) { HCIMPL_PROLOG(funcname) #define HCIMPL3(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a3) { HCIMPL_PROLOG(funcname) #define HCIMPL3_RAW(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a3) { #define HCIMPL4(rettype, funcname, a1, a2, a3, a4) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a4, a3) { HCIMPL_PROLOG(funcname) #define HCIMPL5(rettype, funcname, a1, a2, a3, a4, a5) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a5, a4, a3) { HCIMPL_PROLOG(funcname) #define HCCALL0(funcname) funcname() #define HCCALL1(funcname, a1) funcname(0, 0, a1) #define HCCALL1_V(funcname, a1) funcname(0, 0, 0, a1) #define HCCALL2(funcname, a1, a2) funcname(0, a2, a1) #define HCCALL3(funcname, a1, a2, a3) funcname(0, a2, a1, a3) #define HCCALL4(funcname, a1, a2, a3, a4) funcname(0, a2, a1, a4, a3) #define HCCALL5(funcname, a1, a2, a3, a4, a5) funcname(0, a2, a1, a5, a4, a3) #define HCCALL1_PTR(rettype, funcptr, a1) rettype (F_CALL_CONV * funcptr)(int /* EAX */, int /* EDX */, a1) #define HCCALL2_PTR(rettype, funcptr, a1, a2) rettype (F_CALL_CONV * funcptr)(int /* EAX */, a2, a1) #else // SWIZZLE_REGARG_ORDER #define HCIMPL0(rettype, funcname) rettype F_CALL_CONV funcname() { HCIMPL_PROLOG(funcname) #define HCIMPL1(rettype, funcname, a1) rettype F_CALL_CONV funcname(a1) { HCIMPL_PROLOG(funcname) #define HCIMPL1_RAW(rettype, funcname, a1) rettype F_CALL_CONV funcname(a1) { #define HCIMPL1_V(rettype, funcname, a1) rettype F_CALL_CONV funcname(a1) { HCIMPL_PROLOG(funcname) #define HCIMPL2(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(a1, a2) { HCIMPL_PROLOG(funcname) #define HCIMPL2_RAW(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(a1, a2) { #define HCIMPL2_VV(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(a2, a1) { HCIMPL_PROLOG(funcname) #define HCIMPL2_IV(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(a1, a2) { HCIMPL_PROLOG(funcname) #define HCIMPL3(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(a1, a2, a3) { HCIMPL_PROLOG(funcname) #define HCIMPL3_RAW(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(a1, a2, a3) { #define HCIMPL4(rettype, funcname, a1, a2, a3, a4) rettype F_CALL_CONV funcname(a1, a2, a4, a3) { HCIMPL_PROLOG(funcname) #define HCIMPL5(rettype, funcname, a1, a2, a3, a4, a5) rettype F_CALL_CONV funcname(a1, a2, a5, a4, a3) { HCIMPL_PROLOG(funcname) #define HCCALL0(funcname) funcname() #define HCCALL1(funcname, a1) funcname(a1) #define HCCALL1_V(funcname, a1) funcname(a1) #define HCCALL2(funcname, a1, a2) funcname(a1, a2) #define HCCALL3(funcname, a1, a2, a3) funcname(a1, a2, a3) #define HCCALL4(funcname, a1, a2, a3, a4) funcname(a1, a2, a4, a3) #define HCCALL5(funcname, a1, a2, a3, a4, a5) funcname(a1, a2, a5, a4, a3) #define HCCALL1_PTR(rettype, funcptr, a1) rettype (F_CALL_CONV * (funcptr))(a1) #define HCCALL2_PTR(rettype, funcptr, a1, a2) rettype (F_CALL_CONV * (funcptr))(a1, a2) #endif // !SWIZZLE_REGARG_ORDER #else // SWIZZLE_STKARG_ORDER #define HCIMPL0(rettype, funcname) rettype F_CALL_CONV funcname() { HCIMPL_PROLOG(funcname) #define HCIMPL1(rettype, funcname, a1) rettype F_CALL_CONV funcname(a1) { HCIMPL_PROLOG(funcname) #define HCIMPL1_RAW(rettype, funcname, a1) rettype F_CALL_CONV funcname(a1) { #define HCIMPL1_V(rettype, funcname, a1) rettype F_CALL_CONV funcname(a1) { HCIMPL_PROLOG(funcname) #define HCIMPL2(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(a1, a2) { HCIMPL_PROLOG(funcname) #define HCIMPL2_RAW(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(a1, a2) { #define HCIMPL2_VV(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(a1, a2) { HCIMPL_PROLOG(funcname) #define HCIMPL2_IV(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(a1, a2) { HCIMPL_PROLOG(funcname) #define HCIMPL3(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(a1, a2, a3) { HCIMPL_PROLOG(funcname) #define HCIMPL3_RAW(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(a1, a2, a3) { #define HCIMPL4(rettype, funcname, a1, a2, a3, a4) rettype F_CALL_CONV funcname(a1, a2, a3, a4) { HCIMPL_PROLOG(funcname) #define HCIMPL5(rettype, funcname, a1, a2, a3, a4, a5) rettype F_CALL_CONV funcname(a1, a2, a3, a4, a5) { HCIMPL_PROLOG(funcname) #define HCCALL0(funcname) funcname() #define HCCALL1(funcname, a1) funcname(a1) #define HCCALL1_V(funcname, a1) funcname(a1) #define HCCALL2(funcname, a1, a2) funcname(a1, a2) #define HCCALL3(funcname, a1, a2, a3) funcname(a1, a2, a3) #define HCCALL4(funcname, a1, a2, a3, a4) funcname(a1, a2, a3, a4) #define HCCALL5(funcname, a1, a2, a3, a4, a5) funcname(a1, a2, a3, a4, a5) #define HCCALL1_PTR(rettype, funcptr, a1) rettype (F_CALL_CONV * (funcptr))(a1) #define HCCALL2_PTR(rettype, funcptr, a1, a2) rettype (F_CALL_CONV * (funcptr))(a1, a2) #endif // !SWIZZLE_STKARG_ORDER #define HCIMPLEND_RAW } #define HCIMPLEND FCALL_TRANSITION_END(); } //============================================================================================== // Throws an exception from an FCall. See rexcep.h for a list of valid // exception codes. //============================================================================================== #define FCThrow(reKind) FCThrowEx(reKind, 0, 0, 0, 0) //============================================================================================== // This version lets you attach a message with inserts (similar to // COMPlusThrow()). //============================================================================================== #define FCThrowEx(reKind, resID, arg1, arg2, arg3) \ { \ while (NULL == \ __FCThrow(__me, reKind, resID, arg1, arg2, arg3)) {}; \ return 0; \ } //============================================================================================== // Like FCThrow but can be used for a VOID-returning FCall. The only // difference is in the "return" statement. //============================================================================================== #define FCThrowVoid(reKind) FCThrowExVoid(reKind, 0, 0, 0, 0) //============================================================================================== // This version lets you attach a message with inserts (similar to // COMPlusThrow()). //============================================================================================== #define FCThrowExVoid(reKind, resID, arg1, arg2, arg3) \ { \ while (NULL == \ __FCThrow(__me, reKind, resID, arg1, arg2, arg3)) {}; \ return; \ } // Use FCThrowRes to throw an exception with a localized error message from the // ResourceManager in managed code. #define FCThrowRes(reKind, resourceName) FCThrowArgumentEx(reKind, NULL, resourceName) #define FCThrowArgumentNull(argName) FCThrowArgumentEx(kArgumentNullException, argName, NULL) #define FCThrowArgumentOutOfRange(argName, message) FCThrowArgumentEx(kArgumentOutOfRangeException, argName, message) #define FCThrowArgument(argName, message) FCThrowArgumentEx(kArgumentException, argName, message) #define FCThrowArgumentEx(reKind, argName, resourceName) \ { \ while (NULL == \ __FCThrowArgument(__me, reKind, argName, resourceName)) {}; \ return 0; \ } // Use FCThrowRes to throw an exception with a localized error message from the // ResourceManager in managed code. #define FCThrowResVoid(reKind, resourceName) FCThrowArgumentVoidEx(reKind, NULL, resourceName) #define FCThrowArgumentNullVoid(argName) FCThrowArgumentVoidEx(kArgumentNullException, argName, NULL) #define FCThrowArgumentOutOfRangeVoid(argName, message) FCThrowArgumentVoidEx(kArgumentOutOfRangeException, argName, message) #define FCThrowArgumentVoid(argName, message) FCThrowArgumentVoidEx(kArgumentException, argName, message) #define FCThrowArgumentVoidEx(reKind, argName, resourceName) \ { \ while (NULL == \ __FCThrowArgument(__me, reKind, argName, resourceName)) {}; \ return; \ } // The x86 JIT calling convention expects returned small types (e.g. bool) to be // widened on return. The C/C++ calling convention does not guarantee returned // small types to be widened. The small types has to be artifically widened on return // to fit x86 JIT calling convention. Thus fcalls returning small types has to // use the FC_XXX_RET types to force C/C++ compiler to do the widening. // // The most common small return type of FCALLs is bool. The widening of bool is // especially tricky since the value has to be also normalized. FC_BOOL_RET and // FC_RETURN_BOOL macros are provided to make it fool-proof. FCALLs returning bool // should be implemented using following pattern: // FCIMPL0(FC_BOOL_RET, Foo) // the return type should be FC_BOOL_RET // BOOL ret; // // FC_RETURN_BOOL(ret); // return statements should be FC_RETURN_BOOL // FCIMPLEND // This rules are verified in binder.cpp if COMPlus_ConsistencyCheck is set. #ifdef _PREFAST_ // Use prefast build to ensure that functions returning FC_BOOL_RET // are using FC_RETURN_BOOL to return it. Missing FC_RETURN_BOOL will // result into type mismatch error in prefast builds. This will also // catch misuses of FC_BOOL_RET for other places (e.g. in FCALL parameters). typedef LPVOID FC_BOOL_RET; #define FC_RETURN_BOOL(x) do { return (LPVOID)!!(x); } while(0) #else #if defined(TARGET_X86) || defined(TARGET_AMD64) // The return value is artifically widened on x86 and amd64 typedef INT32 FC_BOOL_RET; #else typedef CLR_BOOL FC_BOOL_RET; #endif #define FC_RETURN_BOOL(x) do { return !!(x); } while(0) #endif #if defined(TARGET_X86) || defined(TARGET_AMD64) // The return value is artifically widened on x86 and amd64 typedef UINT32 FC_CHAR_RET; typedef INT32 FC_INT8_RET; typedef UINT32 FC_UINT8_RET; typedef INT32 FC_INT16_RET; typedef UINT32 FC_UINT16_RET; #else typedef CLR_CHAR FC_CHAR_RET; typedef INT8 FC_INT8_RET; typedef UINT8 FC_UINT8_RET; typedef INT16 FC_INT16_RET; typedef UINT16 FC_UINT16_RET; #endif // FC_TypedByRef should be used for TypedReferences in FCall signatures #define FC_TypedByRef TypedByRef #define FC_DECIMAL DECIMAL // The fcall entrypoints has to be at unique addresses. Use this helper macro to make // the code of the fcalls unique if you get assert in ecall.cpp that mentions it. // The parameter of the FCUnique macro is an arbitrary 32-bit random non-zero number. #define FCUnique(unique) { Volatile<int> u = (unique); while (u.LoadWithoutBarrier() == 0) { }; } // FCALL contracts come in two forms: // // Short form that should be used if the FCALL contract does not have any extras like preconditions, failure injection. Example: // // FCIMPL0(void, foo) // { // FCALL_CONTRACT; // ... // // Long form that should be used otherwise. Example: // // FCIMPL1(void, foo, void *p) // { // CONTRACTL { // FCALL_CHECK; // PRECONDITION(CheckPointer(p)); // } CONTRACTL_END; // ... // // FCALL_CHECK defines the actual contract conditions required for FCALLs // #define FCALL_CHECK \ THROWS; \ DISABLED(GC_TRIGGERS); /* FCALLS with HELPER frames have issues with GC_TRIGGERS */ \ MODE_COOPERATIVE; // // FCALL_CONTRACT should be the following shortcut: // // #define FCALL_CONTRACT CONTRACTL { FCALL_CHECK; } CONTRACTL_END; // // Since there is very little value in having runtime contracts in FCalls, FCALL_CONTRACT is defined as static contract only for performance reasons. // #define FCALL_CONTRACT \ STATIC_CONTRACT_THROWS; \ /* FCALLS are a special case contract wise, they are "NOTRIGGER, unless you setup a frame" */ \ STATIC_CONTRACT_GC_NOTRIGGER; \ STATIC_CONTRACT_MODE_COOPERATIVE #endif //__FCall_h__
1
dotnet/runtime
66,234
Remove compiler warning suppression
Contributes to https://github.com/dotnet/runtime/issues/66154 All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address. ~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~ Upstream PR: https://github.com/libunwind/libunwind/pull/333 /cc @GrabYourPitchforks
AaronRobinsonMSFT
2022-03-05T03:45:49Z
2022-03-09T00:57:12Z
220e67755ae6323a01011b83070dff6f84b02519
853e494abd1c1e12d28a76829b4680af7f694afa
Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154 All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address. ~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~ Upstream PR: https://github.com/libunwind/libunwind/pull/333 /cc @GrabYourPitchforks
./src/coreclr/vm/object.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // OBJECT.CPP // // Definitions of a Com+ Object // #include "common.h" #include "vars.hpp" #include "class.h" #include "object.h" #include "threads.h" #include "excep.h" #include "eeconfig.h" #include "gcheaputilities.h" #include "field.h" #include "argdestination.h" SVAL_IMPL(INT32, ArrayBase, s_arrayBoundsZero); // follow the necessary rules to get a new valid hashcode for an object DWORD Object::ComputeHashCode() { DWORD hashCode; // note that this algorithm now uses at most HASHCODE_BITS so that it will // fit into the objheader if the hashcode has to be moved back into the objheader // such as for an object that is being frozen do { // we use the high order bits in this case because they're more random hashCode = GetThread()->GetNewHashCode() >> (32-HASHCODE_BITS); } while (hashCode == 0); // need to enforce hashCode != 0 // verify that it really fits into HASHCODE_BITS _ASSERTE((hashCode & ((1<<HASHCODE_BITS)-1)) == hashCode); return hashCode; } #ifndef DACCESS_COMPILE INT32 Object::GetHashCodeEx() { CONTRACTL { MODE_COOPERATIVE; THROWS; GC_NOTRIGGER; } CONTRACTL_END // This loop exists because we're inspecting the header dword of the object // and it may change under us because of races with other threads. // On top of that, it may have the spin lock bit set, in which case we're // not supposed to change it. // In all of these case, we need to retry the operation. DWORD iter = 0; DWORD dwSwitchCount = 0; while (true) { DWORD bits = GetHeader()->GetBits(); if (bits & BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX) { if (bits & BIT_SBLK_IS_HASHCODE) { // Common case: the object already has a hash code return bits & MASK_HASHCODE; } else { // We have a sync block index. This means if we already have a hash code, // it is in the sync block, otherwise we generate a new one and store it there SyncBlock *psb = GetSyncBlock(); DWORD hashCode = psb->GetHashCode(); if (hashCode != 0) return hashCode; hashCode = ComputeHashCode(); return psb->SetHashCode(hashCode); } } else { // If a thread is holding the thin lock we need a syncblock if ((bits & (SBLK_MASK_LOCK_THREADID)) != 0) { GetSyncBlock(); // No need to replicate the above code dealing with sync blocks // here - in the next iteration of the loop, we'll realize // we have a syncblock, and we'll do the right thing. } else { // We want to change the header in this case, so we have to check the BIT_SBLK_SPIN_LOCK bit first if (bits & BIT_SBLK_SPIN_LOCK) { iter++; if ((iter % 1024) != 0 && g_SystemInfo.dwNumberOfProcessors > 1) { YieldProcessorNormalized(); // indicate to the processor that we are spinning } else { __SwitchToThread(0, ++dwSwitchCount); } continue; } DWORD hashCode = ComputeHashCode(); DWORD newBits = bits | BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX | BIT_SBLK_IS_HASHCODE | hashCode; if (GetHeader()->SetBits(newBits, bits) == bits) return hashCode; // Header changed under us - let's restart this whole thing. } } } } #endif // #ifndef DACCESS_COMPILE BOOL Object::ValidateObjectWithPossibleAV() { CANNOT_HAVE_CONTRACT; SUPPORTS_DAC; return GetGCSafeMethodTable()->ValidateWithPossibleAV(); } #ifndef DACCESS_COMPILE // There are cases where it is not possible to get a type handle during a GC. // If we can get the type handle, this method will return it. // Otherwise, the method will return NULL. TypeHandle Object::GetGCSafeTypeHandleIfPossible() const { CONTRACTL { NOTHROW; GC_NOTRIGGER; if(!IsGCThread()) { MODE_COOPERATIVE; } } CONTRACTL_END; // Although getting the type handle is unsafe and could cause recursive type lookups // in some cases, it's always safe and straightforward to get to the MethodTable. MethodTable * pMT = GetGCSafeMethodTable(); _ASSERTE(pMT != NULL); if (pMT == g_pFreeObjectMethodTable) { return NULL; } // Don't look at types that belong to an unloading AppDomain, or else // pObj->GetGCSafeTypeHandle() can AV. For example, we encountered this AV when pObj // was an array like this: // // MyValueType1<MyValueType2>[] myArray // // where MyValueType1<T> & MyValueType2 are defined in different assemblies. In such // a case, looking up the type handle for myArray requires looking in // MyValueType1<T>'s module's m_AssemblyRefByNameTable, which is garbage if its // AppDomain is unloading. // // Another AV was encountered in a similar case, // // MyRefType1<MyRefType2>[] myArray // // where MyRefType2's module was unloaded by the time the GC occurred. In at least // one case, the GC was caused by the AD unload itself (AppDomain::Unload -> // AppDomain::Exit -> GCInterface::AddMemoryPressure -> WKS::GCHeapUtilities::GarbageCollect). // // To protect against all scenarios, verify that // // * The MT of the object is not getting unloaded, OR // * In the case of arrays (potentially of arrays of arrays of arrays ...), the // MT of the innermost element is not getting unloaded. This then ensures the // MT of the original object (i.e., array) itself must not be getting // unloaded either, since the MTs of arrays and of their elements are // allocated on the same loader allocator. Module * pLoaderModule = pMT->GetLoaderModule(); // Don't look up types that are unloading due to Collectible Assemblies. Haven't been // able to find a case where we actually encounter objects like this that can cause // problems; however, it seems prudent to add this protection just in case. LoaderAllocator * pLoaderAllocator = pLoaderModule->GetLoaderAllocator(); _ASSERTE(pLoaderAllocator != NULL); if ((pLoaderAllocator->IsCollectible()) && (ObjectHandleIsNull(pLoaderAllocator->GetLoaderAllocatorObjectHandle()))) { return NULL; } // Ok, it should now be safe to get the type handle return GetGCSafeTypeHandle(); } /* static */ BOOL Object::SupportsInterface(OBJECTREF pObj, MethodTable* pInterfaceMT) { CONTRACTL { THROWS; GC_TRIGGERS; INJECT_FAULT(COMPlusThrowOM()); PRECONDITION(CheckPointer(pInterfaceMT)); PRECONDITION(pObj->GetMethodTable()->IsRestored_NoLogging()); PRECONDITION(pInterfaceMT->IsInterface()); } CONTRACTL_END BOOL bSupportsItf = FALSE; GCPROTECT_BEGIN(pObj) { // Make sure the interface method table has been restored. pInterfaceMT->CheckRestore(); // Check to see if the static class definition indicates we implement the interface. MethodTable * pMT = pObj->GetMethodTable(); if (pMT->CanCastToInterface(pInterfaceMT)) { bSupportsItf = TRUE; } #ifdef FEATURE_COMINTEROP else if (pMT->IsComObjectType()) { // If this is a COM object, the static class definition might not be complete so we need // to check if the COM object implements the interface. bSupportsItf = ComObject::SupportsInterface(pObj, pInterfaceMT); } #endif // FEATURE_COMINTEROP } GCPROTECT_END(); return bSupportsItf; } Assembly *AssemblyBaseObject::GetAssembly() { WRAPPER_NO_CONTRACT; return m_pAssembly->GetAssembly(); } STRINGREF AllocateString(SString sstr) { CONTRACTL { THROWS; GC_TRIGGERS; } CONTRACTL_END; COUNT_T length = sstr.GetCount(); // count of WCHARs excluding terminating NULL STRINGREF strObj = AllocateString(length); memcpyNoGCRefs(strObj->GetBuffer(), sstr.GetUnicode(), length*sizeof(WCHAR)); return strObj; } CHARARRAYREF AllocateCharArray(DWORD dwArrayLength) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; } CONTRACTL_END; return (CHARARRAYREF)AllocatePrimitiveArray(ELEMENT_TYPE_CHAR, dwArrayLength); } void Object::ValidateHeap(BOOL bDeep) { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_FORBID_FAULT; #if defined (VERIFY_HEAP) //no need to verify next object's header in this case //since this is called in verify_heap, which will verfiy every object anyway Validate(bDeep, FALSE); #endif } void Object::SetOffsetObjectRef(DWORD dwOffset, size_t dwValue) { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_FORBID_FAULT; STATIC_CONTRACT_MODE_COOPERATIVE; OBJECTREF* location; OBJECTREF o; location = (OBJECTREF *) &GetData()[dwOffset]; o = ObjectToOBJECTREF(*(Object **) &dwValue); SetObjectReference( location, o ); } void SetObjectReferenceUnchecked(OBJECTREF *dst,OBJECTREF ref) { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_FORBID_FAULT; STATIC_CONTRACT_MODE_COOPERATIVE; STATIC_CONTRACT_CANNOT_TAKE_LOCK; // Assign value. We use casting to avoid going thru the overloaded // OBJECTREF= operator which in this case would trigger a false // write-barrier violation assert. VolatileStore((Object**)dst, OBJECTREFToObject(ref)); #ifdef _DEBUG Thread::ObjectRefAssign(dst); #endif ErectWriteBarrier(dst, ref); } void STDCALL CopyValueClassUnchecked(void* dest, void* src, MethodTable *pMT) { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_FORBID_FAULT; STATIC_CONTRACT_MODE_COOPERATIVE; _ASSERTE(!pMT->IsArray()); // bunch of assumptions about arrays wrong. if (pMT->ContainsPointers()) { memmoveGCRefs(dest, src, pMT->GetNumInstanceFieldBytes()); } else { switch (pMT->GetNumInstanceFieldBytes()) { case 1: *(UINT8*)dest = *(UINT8*)src; break; #ifndef ALIGN_ACCESS // we can hit an alignment fault if the value type has multiple // smaller fields. Example: if there are two I4 fields, the // value class can be aligned to 4-byte boundaries, yet the // NumInstanceFieldBytes is 8 case 2: *(UINT16*)dest = *(UINT16*)src; break; case 4: *(UINT32*)dest = *(UINT32*)src; break; case 8: *(UINT64*)dest = *(UINT64*)src; break; #endif // !ALIGN_ACCESS default: memcpyNoGCRefs(dest, src, pMT->GetNumInstanceFieldBytes()); break; } } } // Copy value class into the argument specified by the argDest. // The destOffset is nonzero when copying values into Nullable<T>, it is the offset // of the T value inside of the Nullable<T> void STDCALL CopyValueClassArgUnchecked(ArgDestination *argDest, void* src, MethodTable *pMT, int destOffset) { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_FORBID_FAULT; STATIC_CONTRACT_MODE_COOPERATIVE; #if defined(UNIX_AMD64_ABI) if (argDest->IsStructPassedInRegs()) { argDest->CopyStructToRegisters(src, pMT->GetNumInstanceFieldBytes(), destOffset); return; } #elif defined(TARGET_ARM64) if (argDest->IsHFA()) { argDest->CopyHFAStructToRegister(src, pMT->GetNumInstanceFieldBytes()); return; } #endif // UNIX_AMD64_ABI // destOffset is only valid for Nullable<T> passed in registers _ASSERTE(destOffset == 0); CopyValueClassUnchecked(argDest->GetDestinationAddress(), src, pMT); } // Initialize the value class argument to zeros void InitValueClassArg(ArgDestination *argDest, MethodTable *pMT) { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_FORBID_FAULT; STATIC_CONTRACT_MODE_COOPERATIVE; #if defined(UNIX_AMD64_ABI) if (argDest->IsStructPassedInRegs()) { argDest->ZeroStructInRegisters(pMT->GetNumInstanceFieldBytes()); return; } #endif InitValueClass(argDest->GetDestinationAddress(), pMT); } #if defined (VERIFY_HEAP) #include "dbginterface.h" // make the checking code goes as fast as possible! #if defined(_MSC_VER) #pragma optimize("tgy", on) #endif #define CREATE_CHECK_STRING(x) #x #define CHECK_AND_TEAR_DOWN(x) \ do{ \ if (!(x)) \ { \ _ASSERTE(!CREATE_CHECK_STRING(x)); \ EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE); \ } \ } while (0) VOID Object::Validate(BOOL bDeep, BOOL bVerifyNextHeader, BOOL bVerifySyncBlock) { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_FORBID_FAULT; STATIC_CONTRACT_MODE_COOPERATIVE; STATIC_CONTRACT_CANNOT_TAKE_LOCK; if (g_IBCLogger.InstrEnabled() && !GCStress<cfg_any>::IsEnabled()) { // If we are instrumenting for IBC (and GCStress is not enabled) // then skip these Object::Validate() as they slow down the // instrument phase by an order of magnitude return; } if (g_fEEShutDown & ShutDown_Phase2) { // During second phase of shutdown the code below is not guaranteed to work. return; } #ifdef _DEBUG { Thread *pThread = GetThreadNULLOk(); if (pThread != NULL && !(pThread->PreemptiveGCDisabled())) { // Debugger helper threads are special in that they take over for // what would normally be a nonEE thread (the RCThread). If an // EE thread is doing RCThread duty, then it should be treated // as such. // // There are some GC threads in the same kind of category. Note that // GetThread() sometimes returns them, if DLL_THREAD_ATTACH notifications // have run some managed code. if (!dbgOnly_IsSpecialEEThread() && !IsGCSpecialThread()) _ASSERTE(!"OBJECTREF being accessed while thread is in preemptive GC mode."); } } #endif { // ValidateInner can throw or fault on failure which violates contract. CONTRACT_VIOLATION(ThrowsViolation | FaultViolation); // using inner helper because of TRY and stack objects with destructors. ValidateInner(bDeep, bVerifyNextHeader, bVerifySyncBlock); } } VOID Object::ValidateInner(BOOL bDeep, BOOL bVerifyNextHeader, BOOL bVerifySyncBlock) { STATIC_CONTRACT_THROWS; // See CONTRACT_VIOLATION above STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_FAULT; // See CONTRACT_VIOLATION above STATIC_CONTRACT_MODE_COOPERATIVE; STATIC_CONTRACT_CANNOT_TAKE_LOCK; int lastTest = 0; EX_TRY { // in order to avoid contract violations in the EH code we'll allow AVs here, // they'll be handled in the catch block AVInRuntimeImplOkayHolder avOk; MethodTable *pMT = GetGCSafeMethodTable(); lastTest = 1; CHECK_AND_TEAR_DOWN(pMT && pMT->Validate()); lastTest = 2; bool noRangeChecks = (g_pConfig->GetHeapVerifyLevel() & EEConfig::HEAPVERIFY_NO_RANGE_CHECKS) == EEConfig::HEAPVERIFY_NO_RANGE_CHECKS; // noRangeChecks depends on initial values being FALSE BOOL bSmallObjectHeapPtr = FALSE, bLargeObjectHeapPtr = FALSE; if (!noRangeChecks) { bSmallObjectHeapPtr = GCHeapUtilities::GetGCHeap()->IsHeapPointer(this, true); if (!bSmallObjectHeapPtr) bLargeObjectHeapPtr = GCHeapUtilities::GetGCHeap()->IsHeapPointer(this); CHECK_AND_TEAR_DOWN(bSmallObjectHeapPtr || bLargeObjectHeapPtr); } lastTest = 3; if (bDeep) { CHECK_AND_TEAR_DOWN(GetHeader()->Validate(bVerifySyncBlock)); } lastTest = 4; if (bDeep && (g_pConfig->GetHeapVerifyLevel() & EEConfig::HEAPVERIFY_GC)) { GCHeapUtilities::GetGCHeap()->ValidateObjectMember(this); } lastTest = 5; // since bSmallObjectHeapPtr is initialized to FALSE // we skip checking noRangeChecks since if skipping // is enabled bSmallObjectHeapPtr will always be false. if (bSmallObjectHeapPtr) { CHECK_AND_TEAR_DOWN(!GCHeapUtilities::GetGCHeap()->IsLargeObject(this)); } lastTest = 6; lastTest = 7; _ASSERTE(GCHeapUtilities::IsGCHeapInitialized()); // try to validate next object's header if (bDeep && bVerifyNextHeader && GCHeapUtilities::GetGCHeap()->RuntimeStructuresValid() //NextObj could be very slow if concurrent GC is going on && !GCHeapUtilities::GetGCHeap ()->IsConcurrentGCInProgress ()) { Object * nextObj = GCHeapUtilities::GetGCHeap ()->NextObj (this); if ((nextObj != NULL) && (nextObj->GetGCSafeMethodTable() != g_pFreeObjectMethodTable)) { // we need a read barrier here - to make sure we read the object header _after_ // reading data that tells us that the object is eligible for verification // (also see: gc.cpp/a_fit_segment_end_p) VOLATILE_MEMORY_BARRIER(); CHECK_AND_TEAR_DOWN(nextObj->GetHeader()->Validate(FALSE)); } } lastTest = 8; #ifdef FEATURE_64BIT_ALIGNMENT if (pMT->RequiresAlign8()) { CHECK_AND_TEAR_DOWN((((size_t)this) & 0x7) == (pMT->IsValueType()? 4:0)); } lastTest = 9; #endif // FEATURE_64BIT_ALIGNMENT } EX_CATCH { STRESS_LOG3(LF_ASSERT, LL_ALWAYS, "Detected use of corrupted OBJECTREF: %p [MT=%p] (lastTest=%d)", this, lastTest > 0 ? (*(size_t*)this) : 0, lastTest); CHECK_AND_TEAR_DOWN(!"Detected use of a corrupted OBJECTREF. Possible GC hole."); } EX_END_CATCH(SwallowAllExceptions); } #endif // VERIFY_HEAP /*==================================NewString=================================== **Action: Creates a System.String object. **Returns: **Arguments: **Exceptions: ==============================================================================*/ STRINGREF StringObject::NewString(INT32 length) { CONTRACTL { GC_TRIGGERS; MODE_COOPERATIVE; PRECONDITION(length>=0); } CONTRACTL_END; STRINGREF pString; if (length<0) { return NULL; } else if (length == 0) { return GetEmptyString(); } else { pString = AllocateString(length); _ASSERTE(pString->GetBuffer()[length] == 0); return pString; } } /*==================================NewString=================================== **Action: Many years ago, VB didn't have the concept of a byte array, so enterprising ** users created one by allocating a BSTR with an odd length and using it to ** store bytes. A generation later, we're still stuck supporting this behavior. ** The way that we do this is to take advantage of the difference between the ** array length and the string length. The string length will always be the ** number of characters between the start of the string and the terminating 0. ** If we need an odd number of bytes, we'll take one wchar after the terminating 0. ** (e.g. at position StringLength+1). The high-order byte of this wchar is ** reserved for flags and the low-order byte is our odd byte. This function is ** used to allocate a string of that shape, but we don't actually mark the ** trailing byte as being in use yet. **Returns: A newly allocated string. Null if length is less than 0. **Arguments: length -- the length of the string to allocate ** bHasTrailByte -- whether the string also has a trailing byte. **Exceptions: OutOfMemoryException if AllocateString fails. ==============================================================================*/ STRINGREF StringObject::NewString(INT32 length, BOOL bHasTrailByte) { CONTRACTL { GC_TRIGGERS; MODE_COOPERATIVE; PRECONDITION(length>=0 && length != INT32_MAX); } CONTRACTL_END; STRINGREF pString; if (length<0 || length == INT32_MAX) { return NULL; } else if (length == 0) { return GetEmptyString(); } else { pString = AllocateString(length); _ASSERTE(pString->GetBuffer()[length]==0); if (bHasTrailByte) { _ASSERTE(pString->GetBuffer()[length+1]==0); } } return pString; } //======================================================================== // Creates a System.String object and initializes from // the supplied null-terminated C string. // // Maps NULL to null. This function does *not* return null to indicate // error situations: it throws an exception instead. //======================================================================== STRINGREF StringObject::NewString(const WCHAR *pwsz) { CONTRACTL { GC_TRIGGERS; MODE_COOPERATIVE; } CONTRACTL_END; if (!pwsz) { return NULL; } else { DWORD nch = (DWORD)wcslen(pwsz); if (nch==0) { return GetEmptyString(); } #if 0 // // This assert is disabled because it is valid for us to get a // pointer from the gc heap here as long as it is pinned. This // can happen when a string is marshalled to unmanaged by // pinning and then later put into a struct and that struct is // then marshalled to managed. // _ASSERTE(!GCHeapUtilities::GetGCHeap()->IsHeapPointer((BYTE *) pwsz) || !"pwsz can not point to GC Heap"); #endif // 0 STRINGREF pString = AllocateString( nch ); memcpyNoGCRefs(pString->GetBuffer(), pwsz, nch*sizeof(WCHAR)); _ASSERTE(pString->GetBuffer()[nch] == 0); return pString; } } #if defined(_MSC_VER) && defined(TARGET_X86) #pragma optimize("y", on) // Small critical routines, don't put in EBP frame #endif STRINGREF StringObject::NewString(const WCHAR *pwsz, int length) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; PRECONDITION(length>=0); } CONTRACTL_END; if (!pwsz) { return NULL; } else if (length <= 0) { return GetEmptyString(); } else { #if 0 // // This assert is disabled because it is valid for us to get a // pointer from the gc heap here as long as it is pinned. This // can happen when a string is marshalled to unmanaged by // pinning and then later put into a struct and that struct is // then marshalled to managed. // _ASSERTE(!GCHeapUtilities::GetGCHeap()->IsHeapPointer((BYTE *) pwsz) || !"pwsz can not point to GC Heap"); #endif // 0 STRINGREF pString = AllocateString(length); memcpyNoGCRefs(pString->GetBuffer(), pwsz, length*sizeof(WCHAR)); _ASSERTE(pString->GetBuffer()[length] == 0); return pString; } } #if defined(_MSC_VER) && defined(TARGET_X86) #pragma optimize("", on) // Go back to command line default optimizations #endif STRINGREF StringObject::NewString(LPCUTF8 psz) { CONTRACTL { GC_TRIGGERS; MODE_COOPERATIVE; THROWS; PRECONDITION(CheckPointer(psz)); } CONTRACTL_END; int length = (int)strlen(psz); if (length == 0) { return GetEmptyString(); } CQuickBytes qb; WCHAR* pwsz = (WCHAR*) qb.AllocThrows((length) * sizeof(WCHAR)); length = WszMultiByteToWideChar(CP_UTF8, 0, psz, length, pwsz, length); if (length == 0) { COMPlusThrow(kArgumentException, W("Arg_InvalidUTF8String")); } return NewString(pwsz, length); } STRINGREF StringObject::NewString(LPCUTF8 psz, int cBytes) { CONTRACTL { GC_TRIGGERS; MODE_COOPERATIVE; THROWS; PRECONDITION(CheckPointer(psz, NULL_OK)); } CONTRACTL_END; if (!psz) return NULL; _ASSERTE(psz); _ASSERTE(cBytes >= 0); if (cBytes == 0) { return GetEmptyString(); } int cWszBytes = 0; if (!ClrSafeInt<int>::multiply(cBytes, sizeof(WCHAR), cWszBytes)) COMPlusThrowOM(); CQuickBytes qb; WCHAR* pwsz = (WCHAR*) qb.AllocThrows(cWszBytes); int length = WszMultiByteToWideChar(CP_UTF8, 0, psz, cBytes, pwsz, cBytes); if (length == 0) { COMPlusThrow(kArgumentException, W("Arg_InvalidUTF8String")); } return NewString(pwsz, length); } // // // STATIC MEMBER VARIABLES // // STRINGREF* StringObject::EmptyStringRefPtr=NULL; //The special string helpers are used as flag bits for weird strings that have bytes //after the terminating 0. The only case where we use this right now is the VB BSTR as //byte array which is described in MakeStringAsByteArrayFromBytes. #define SPECIAL_STRING_VB_BYTE_ARRAY 0x100 FORCEINLINE BOOL MARKS_VB_BYTE_ARRAY(WCHAR x) { return static_cast<BOOL>(x & SPECIAL_STRING_VB_BYTE_ARRAY); } FORCEINLINE WCHAR MAKE_VB_TRAIL_BYTE(BYTE x) { return static_cast<WCHAR>(x) | SPECIAL_STRING_VB_BYTE_ARRAY; } FORCEINLINE BYTE GET_VB_TRAIL_BYTE(WCHAR x) { return static_cast<BYTE>(x & 0xFF); } /*==============================InitEmptyStringRefPtr============================ **Action: Gets an empty string refptr, cache the result. **Returns: The retrieved STRINGREF. ==============================================================================*/ STRINGREF* StringObject::InitEmptyStringRefPtr() { CONTRACTL { THROWS; MODE_ANY; GC_TRIGGERS; } CONTRACTL_END; GCX_COOP(); EEStringData data(0, W(""), TRUE); EmptyStringRefPtr = SystemDomain::System()->DefaultDomain()->GetLoaderAllocator()->GetStringObjRefPtrFromUnicodeString(&data); return EmptyStringRefPtr; } // strAChars must be null-terminated, with an appropriate aLength // strBChars must be null-terminated, with an appropriate bLength OR bLength == -1 // If bLength == -1, we stop on the first null character in strBChars BOOL StringObject::CaseInsensitiveCompHelper(_In_reads_(aLength) WCHAR *strAChars, _In_z_ INT8 *strBChars, INT32 aLength, INT32 bLength, INT32 *result) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; PRECONDITION(CheckPointer(strAChars)); PRECONDITION(CheckPointer(strBChars)); PRECONDITION(CheckPointer(result)); } CONTRACTL_END; WCHAR *strAStart = strAChars; INT8 *strBStart = strBChars; unsigned charA; unsigned charB; for(;;) { charA = *strAChars; charB = (unsigned) *strBChars; //Case-insensitive comparison on chars greater than 0x7F //requires a locale-aware casing operation and we're not going there. if ((charA|charB)>0x7F) { *result = 0; return FALSE; } // uppercase both chars. if (charA>='a' && charA<='z') { charA ^= 0x20; } if (charB>='a' && charB<='z') { charB ^= 0x20; } //Return the (case-insensitive) difference between them. if (charA!=charB) { *result = (int)(charA-charB); return TRUE; } if (charA==0) // both strings have null character { if (bLength == -1) { *result = aLength - static_cast<INT32>(strAChars - strAStart); return TRUE; } if (strAChars==strAStart + aLength || strBChars==strBStart + bLength) { *result = aLength - bLength; return TRUE; } // else both embedded zeros } // Next char strAChars++; strBChars++; } } /*============================InternalTrailByteCheck============================ **Action: Many years ago, VB didn't have the concept of a byte array, so enterprising ** users created one by allocating a BSTR with an odd length and using it to ** store bytes. A generation later, we're still stuck supporting this behavior. ** The way that we do this is stick the trail byte in the sync block ** whenever we encounter such a situation. Since we expect this to be a very corner case ** accessing the sync block seems like a good enough solution ** **Returns: True if <CODE>str</CODE> contains a VB trail byte, false otherwise. **Arguments: str -- The string to be examined. **Exceptions: None ==============================================================================*/ BOOL StringObject::HasTrailByte() { WRAPPER_NO_CONTRACT; SyncBlock * pSyncBlock = PassiveGetSyncBlock(); if(pSyncBlock != NULL) { return pSyncBlock->HasCOMBstrTrailByte(); } return FALSE; } /*=================================GetTrailByte================================= **Action: If <CODE>str</CODE> contains a vb trail byte, returns a copy of it. **Returns: True if <CODE>str</CODE> contains a trail byte. *bTrailByte is set to ** the byte in question if <CODE>str</CODE> does have a trail byte, otherwise ** it's set to 0. **Arguments: str -- The string being examined. ** bTrailByte -- An out param to hold the value of the trail byte. **Exceptions: None. ==============================================================================*/ BOOL StringObject::GetTrailByte(BYTE *bTrailByte) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; _ASSERTE(bTrailByte); *bTrailByte=0; BOOL retValue = HasTrailByte(); if(retValue) { *bTrailByte = GET_VB_TRAIL_BYTE(GetHeader()->PassiveGetSyncBlock()->GetCOMBstrTrailByte()); } return retValue; } /*=================================SetTrailByte================================= **Action: Sets the trail byte in the sync block **Returns: True. **Arguments: str -- The string into which to set the trail byte. ** bTrailByte -- The trail byte to be added to the string. **Exceptions: None. ==============================================================================*/ BOOL StringObject::SetTrailByte(BYTE bTrailByte) { WRAPPER_NO_CONTRACT; GetHeader()->GetSyncBlock()->SetCOMBstrTrailByte(MAKE_VB_TRAIL_BYTE(bTrailByte)); return TRUE; } #ifdef USE_CHECKED_OBJECTREFS //------------------------------------------------------------- // Default constructor, for non-initializing declarations: // // OBJECTREF or; //------------------------------------------------------------- OBJECTREF::OBJECTREF() { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_FORBID_FAULT; m_asObj = (Object*)POISONC; Thread::ObjectRefNew(this); } //------------------------------------------------------------- // Copy constructor, for passing OBJECTREF's as function arguments. //------------------------------------------------------------- OBJECTREF::OBJECTREF(const OBJECTREF & objref) { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_MODE_COOPERATIVE; STATIC_CONTRACT_FORBID_FAULT; VALIDATEOBJECT(objref.m_asObj); // !!! If this assert is fired, there are two possibilities: // !!! 1. You are doing a type cast, e.g. *(OBJECTREF*)pObj // !!! Instead, you should use ObjectToOBJECTREF(*(Object**)pObj), // !!! or ObjectToSTRINGREF(*(StringObject**)pObj) // !!! 2. There is a real GC hole here. // !!! Either way you need to fix the code. _ASSERTE(Thread::IsObjRefValid(&objref)); if ((objref.m_asObj != 0) && ((IGCHeap*)GCHeapUtilities::GetGCHeap())->IsHeapPointer( (BYTE*)this )) { _ASSERTE(!"Write Barrier violation. Must use SetObjectReference() to assign OBJECTREF's into the GC heap!"); } m_asObj = objref.m_asObj; if (m_asObj != 0) { ENABLESTRESSHEAP(); } Thread::ObjectRefNew(this); } //------------------------------------------------------------- // VolatileLoadWithoutBarrier constructor //------------------------------------------------------------- OBJECTREF::OBJECTREF(const OBJECTREF *pObjref, tagVolatileLoadWithoutBarrier tag) { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_MODE_COOPERATIVE; STATIC_CONTRACT_FORBID_FAULT; Object* objrefAsObj = VolatileLoadWithoutBarrier(&pObjref->m_asObj); VALIDATEOBJECT(objrefAsObj); // !!! If this assert is fired, there are two possibilities: // !!! 1. You are doing a type cast, e.g. *(OBJECTREF*)pObj // !!! Instead, you should use ObjectToOBJECTREF(*(Object**)pObj), // !!! or ObjectToSTRINGREF(*(StringObject**)pObj) // !!! 2. There is a real GC hole here. // !!! Either way you need to fix the code. _ASSERTE(Thread::IsObjRefValid(pObjref)); if ((objrefAsObj != 0) && ((IGCHeap*)GCHeapUtilities::GetGCHeap())->IsHeapPointer( (BYTE*)this )) { _ASSERTE(!"Write Barrier violation. Must use SetObjectReference() to assign OBJECTREF's into the GC heap!"); } m_asObj = objrefAsObj; if (m_asObj != 0) { ENABLESTRESSHEAP(); } Thread::ObjectRefNew(this); } //------------------------------------------------------------- // To allow NULL to be used as an OBJECTREF. //------------------------------------------------------------- OBJECTREF::OBJECTREF(TADDR nul) { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_FORBID_FAULT; //_ASSERTE(nul == 0); m_asObj = (Object*)nul; if( m_asObj != NULL) { // REVISIT_TODO: fix this, why is this constructor being used for non-null object refs? STATIC_CONTRACT_VIOLATION(ModeViolation); VALIDATEOBJECT(m_asObj); ENABLESTRESSHEAP(); } Thread::ObjectRefNew(this); } //------------------------------------------------------------- // This is for the GC's use only. Non-GC code should never // use the "Object" class directly. The unused "int" argument // prevents C++ from using this to implicitly convert Object*'s // to OBJECTREF. //------------------------------------------------------------- OBJECTREF::OBJECTREF(Object *pObject) { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_MODE_COOPERATIVE; STATIC_CONTRACT_FORBID_FAULT; DEBUG_ONLY_FUNCTION; if ((pObject != 0) && ((IGCHeap*)GCHeapUtilities::GetGCHeap())->IsHeapPointer( (BYTE*)this )) { _ASSERTE(!"Write Barrier violation. Must use SetObjectReference() to assign OBJECTREF's into the GC heap!"); } m_asObj = pObject; VALIDATEOBJECT(m_asObj); if (m_asObj != 0) { ENABLESTRESSHEAP(); } Thread::ObjectRefNew(this); } void OBJECTREF::Validate(BOOL bDeep, BOOL bVerifyNextHeader, BOOL bVerifySyncBlock) { LIMITED_METHOD_CONTRACT; if (m_asObj) { m_asObj->Validate(bDeep, bVerifyNextHeader, bVerifySyncBlock); } } //------------------------------------------------------------- // Test against NULL. //------------------------------------------------------------- int OBJECTREF::operator!() const { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_FORBID_FAULT; // We don't do any validation here, as we want to allow zero comparison in preemptive mode return !m_asObj; } //------------------------------------------------------------- // Compare two OBJECTREF's. //------------------------------------------------------------- int OBJECTREF::operator==(const OBJECTREF &objref) const { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_FORBID_FAULT; if (objref.m_asObj != NULL) // Allow comparison to zero in preemptive mode { // REVISIT_TODO: Weakening the contract system a little bit here. We should really // add a special NULLOBJECTREF which can be used for these situations and have // a seperate code path for that with the correct contract protections. STATIC_CONTRACT_VIOLATION(ModeViolation); VALIDATEOBJECT(objref.m_asObj); // !!! If this assert is fired, there are two possibilities: // !!! 1. You are doing a type cast, e.g. *(OBJECTREF*)pObj // !!! Instead, you should use ObjectToOBJECTREF(*(Object**)pObj), // !!! or ObjectToSTRINGREF(*(StringObject**)pObj) // !!! 2. There is a real GC hole here. // !!! Either way you need to fix the code. _ASSERTE(Thread::IsObjRefValid(&objref)); VALIDATEOBJECT(m_asObj); // If this assert fires, you probably did not protect // your OBJECTREF and a GC might have occurred. To // where the possible GC was, set a breakpoint in Thread::TriggersGC _ASSERTE(Thread::IsObjRefValid(this)); if (m_asObj != 0 || objref.m_asObj != 0) { ENABLESTRESSHEAP(); } } return m_asObj == objref.m_asObj; } //------------------------------------------------------------- // Compare two OBJECTREF's. //------------------------------------------------------------- int OBJECTREF::operator!=(const OBJECTREF &objref) const { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_FORBID_FAULT; if (objref.m_asObj != NULL) // Allow comparison to zero in preemptive mode { // REVISIT_TODO: Weakening the contract system a little bit here. We should really // add a special NULLOBJECTREF which can be used for these situations and have // a seperate code path for that with the correct contract protections. STATIC_CONTRACT_VIOLATION(ModeViolation); VALIDATEOBJECT(objref.m_asObj); // !!! If this assert is fired, there are two possibilities: // !!! 1. You are doing a type cast, e.g. *(OBJECTREF*)pObj // !!! Instead, you should use ObjectToOBJECTREF(*(Object**)pObj), // !!! or ObjectToSTRINGREF(*(StringObject**)pObj) // !!! 2. There is a real GC hole here. // !!! Either way you need to fix the code. _ASSERTE(Thread::IsObjRefValid(&objref)); VALIDATEOBJECT(m_asObj); // If this assert fires, you probably did not protect // your OBJECTREF and a GC might have occurred. To // where the possible GC was, set a breakpoint in Thread::TriggersGC _ASSERTE(Thread::IsObjRefValid(this)); if (m_asObj != 0 || objref.m_asObj != 0) { ENABLESTRESSHEAP(); } } return m_asObj != objref.m_asObj; } //------------------------------------------------------------- // Forward method calls. //------------------------------------------------------------- Object* OBJECTREF::operator->() { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_FORBID_FAULT; VALIDATEOBJECT(m_asObj); // If this assert fires, you probably did not protect // your OBJECTREF and a GC might have occurred. To // where the possible GC was, set a breakpoint in Thread::TriggersGC _ASSERTE(Thread::IsObjRefValid(this)); if (m_asObj != 0) { ENABLESTRESSHEAP(); } // if you are using OBJECTREF directly, // you probably want an Object * return (Object *)m_asObj; } //------------------------------------------------------------- // Forward method calls. //------------------------------------------------------------- const Object* OBJECTREF::operator->() const { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_FORBID_FAULT; VALIDATEOBJECT(m_asObj); // If this assert fires, you probably did not protect // your OBJECTREF and a GC might have occurred. To // where the possible GC was, set a breakpoint in Thread::TriggersGC _ASSERTE(Thread::IsObjRefValid(this)); if (m_asObj != 0) { ENABLESTRESSHEAP(); } // if you are using OBJECTREF directly, // you probably want an Object * return (Object *)m_asObj; } //------------------------------------------------------------- // Assignment. We don't validate the destination so as not // to break the sequence: // // OBJECTREF or; // or = ...; //------------------------------------------------------------- OBJECTREF& OBJECTREF::operator=(const OBJECTREF &objref) { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_FORBID_FAULT; VALIDATEOBJECT(objref.m_asObj); // !!! If this assert is fired, there are two possibilities: // !!! 1. You are doing a type cast, e.g. *(OBJECTREF*)pObj // !!! Instead, you should use ObjectToOBJECTREF(*(Object**)pObj), // !!! or ObjectToSTRINGREF(*(StringObject**)pObj) // !!! 2. There is a real GC hole here. // !!! Either way you need to fix the code. _ASSERTE(Thread::IsObjRefValid(&objref)); if ((objref.m_asObj != 0) && ((IGCHeap*)GCHeapUtilities::GetGCHeap())->IsHeapPointer( (BYTE*)this )) { _ASSERTE(!"Write Barrier violation. Must use SetObjectReference() to assign OBJECTREF's into the GC heap!"); } Thread::ObjectRefAssign(this); m_asObj = objref.m_asObj; if (m_asObj != 0) { ENABLESTRESSHEAP(); } return *this; } //------------------------------------------------------------- // Allows for the assignment of NULL to a OBJECTREF //------------------------------------------------------------- OBJECTREF& OBJECTREF::operator=(TADDR nul) { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_FORBID_FAULT; _ASSERTE(nul == 0); Thread::ObjectRefAssign(this); m_asObj = (Object*)nul; if (m_asObj != 0) { ENABLESTRESSHEAP(); } return *this; } #endif // DEBUG #ifdef _DEBUG void* __cdecl GCSafeMemCpy(void * dest, const void * src, size_t len) { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_FORBID_FAULT; if (!(((*(BYTE**)&dest) < g_lowest_address ) || ((*(BYTE**)&dest) >= g_highest_address))) { Thread* pThread = GetThreadNULLOk(); // GCHeapUtilities::IsHeapPointer has race when called in preemptive mode. It walks the list of segments // that can be modified by GC. Do the check below only if it is safe to do so. if (pThread != NULL && pThread->PreemptiveGCDisabled()) { // Note there is memcpyNoGCRefs which will allow you to do a memcpy into the GC // heap if you really know you don't need to call the write barrier _ASSERTE(!GCHeapUtilities::GetGCHeap()->IsHeapPointer((BYTE *) dest) || !"using memcpy to copy into the GC heap, use CopyValueClass"); } } return memcpyNoGCRefs(dest, src, len); } #endif // _DEBUG // This function clears a piece of memory in a GC safe way. It makes the guarantee // that it will clear memory in at least pointer sized chunks whenever possible. // Unaligned memory at the beginning and remaining bytes at the end are written bytewise. // We must make this guarantee whenever we clear memory in the GC heap that could contain // object references. The GC or other user threads can read object references at any time, // clearing them bytewise can result in a read on another thread getting incorrect data. void __fastcall ZeroMemoryInGCHeap(void* mem, size_t size) { WRAPPER_NO_CONTRACT; BYTE* memBytes = (BYTE*) mem; BYTE* endBytes = &memBytes[size]; // handle unaligned bytes at the beginning while (!IS_ALIGNED(memBytes, sizeof(PTR_PTR_VOID)) && memBytes < endBytes) *memBytes++ = 0; // now write pointer sized pieces // volatile ensures that this doesn't get optimized back into a memset call size_t nPtrs = (endBytes - memBytes) / sizeof(PTR_PTR_VOID); PTR_VOID volatile * memPtr = (PTR_PTR_VOID) memBytes; for (size_t i = 0; i < nPtrs; i++) *memPtr++ = 0; // handle remaining bytes at the end memBytes = (BYTE*) memPtr; while (memBytes < endBytes) *memBytes++ = 0; } void StackTraceArray::Append(StackTraceElement const * begin, StackTraceElement const * end) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; PRECONDITION(IsProtectedByGCFrame((OBJECTREF*)this)); } CONTRACTL_END; // ensure that only one thread can write to the array EnsureThreadAffinity(); size_t newsize = Size() + (end - begin); Grow(newsize); memcpyNoGCRefs(GetData() + Size(), begin, (end - begin) * sizeof(StackTraceElement)); MemoryBarrier(); // prevent the newsize from being reordered with the array copy SetSize(newsize); #if defined(_DEBUG) CheckState(); #endif } void StackTraceArray::CheckState() const { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_COOPERATIVE; } CONTRACTL_END; if (!m_array) return; assert(GetObjectThread() == GetThreadNULLOk()); size_t size = Size(); StackTraceElement const * p; p = GetData(); for (size_t i = 0; i < size; ++i) assert(p[i].pFunc != NULL); } void StackTraceArray::Grow(size_t grow_size) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; INJECT_FAULT(ThrowOutOfMemory();); PRECONDITION(IsProtectedByGCFrame((OBJECTREF*)this)); } CONTRACTL_END; size_t raw_size = grow_size * sizeof(StackTraceElement) + sizeof(ArrayHeader); if (!m_array) { SetArray(I1ARRAYREF(AllocatePrimitiveArray(ELEMENT_TYPE_I1, static_cast<DWORD>(raw_size)))); SetSize(0); SetObjectThread(); } else { if (Capacity() >= raw_size) return; // allocate a new array, copy the data size_t new_capacity = Max(Capacity() * 2, raw_size); _ASSERTE(new_capacity >= grow_size * sizeof(StackTraceElement) + sizeof(ArrayHeader)); I1ARRAYREF newarr = (I1ARRAYREF) AllocatePrimitiveArray(ELEMENT_TYPE_I1, static_cast<DWORD>(new_capacity)); memcpyNoGCRefs(newarr->GetDirectPointerToNonObjectElements(), GetRaw(), Size() * sizeof(StackTraceElement) + sizeof(ArrayHeader)); SetArray(newarr); } } void StackTraceArray::EnsureThreadAffinity() { WRAPPER_NO_CONTRACT; if (!m_array) return; if (GetObjectThread() != GetThreadNULLOk()) { // object is being changed by a thread different from the one which created it // make a copy of the array to prevent a race condition when two different threads try to change it StackTraceArray copy; GCPROTECT_BEGIN(copy); copy.CopyFrom(*this); this->Swap(copy); GCPROTECT_END(); } } #ifdef _MSC_VER #pragma warning(disable: 4267) #endif // Deep copies the stack trace array void StackTraceArray::CopyFrom(StackTraceArray const & src) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; INJECT_FAULT(ThrowOutOfMemory();); PRECONDITION(IsProtectedByGCFrame((OBJECTREF*)this)); PRECONDITION(IsProtectedByGCFrame((OBJECTREF*)&src)); } CONTRACTL_END; m_array = (I1ARRAYREF) AllocatePrimitiveArray(ELEMENT_TYPE_I1, static_cast<DWORD>(src.Capacity())); Volatile<size_t> size = src.Size(); memcpyNoGCRefs(GetRaw(), src.GetRaw(), size * sizeof(StackTraceElement) + sizeof(ArrayHeader)); SetSize(size); // set size to the exact value which was used when we copied the data // another thread might have changed it at the time of copying SetObjectThread(); // affinitize the newly created array with the current thread } #ifdef _MSC_VER #pragma warning(default: 4267) #endif #ifdef _DEBUG //=============================================================================== // Code that insures that our unmanaged version of Nullable is consistant with // the managed version Nullable<T> for all T. void Nullable::CheckFieldOffsets(TypeHandle nullableType) { LIMITED_METHOD_CONTRACT; /*** // The non-instantiated method tables like List<T> that are used // by reflection and verification do not have correct field offsets // but we never make instances of these anyway. if (nullableMT->ContainsGenericVariables()) return; ***/ MethodTable* nullableMT = nullableType.GetMethodTable(); // insure that the managed version of the table is the same as the // unmanaged. Note that we can't do this in corelib.h because this // class is generic and field layout depends on the instantiation. _ASSERTE(nullableMT->GetNumInstanceFields() == 2); FieldDesc* field = nullableMT->GetApproxFieldDescListRaw(); _ASSERTE(strcmp(field->GetDebugName(), "hasValue") == 0); // _ASSERTE(field->GetOffset() == offsetof(Nullable, hasValue)); field++; _ASSERTE(strcmp(field->GetDebugName(), "value") == 0); // _ASSERTE(field->GetOffset() == offsetof(Nullable, value)); } #endif //=============================================================================== // Returns true if nullableMT is Nullable<T> for T is equivalent to paramMT BOOL Nullable::IsNullableForTypeHelper(MethodTable* nullableMT, MethodTable* paramMT) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_ANY; } CONTRACTL_END; if (!nullableMT->IsNullable()) return FALSE; // we require the parameter types to be equivalent return TypeHandle(paramMT).IsEquivalentTo(nullableMT->GetInstantiation()[0]); } //=============================================================================== // Returns true if nullableMT is Nullable<T> for T == paramMT BOOL Nullable::IsNullableForTypeHelperNoGC(MethodTable* nullableMT, MethodTable* paramMT) { LIMITED_METHOD_CONTRACT; if (!nullableMT->IsNullable()) return FALSE; // we require an exact match of the parameter types return TypeHandle(paramMT) == nullableMT->GetInstantiation()[0]; } //=============================================================================== CLR_BOOL* Nullable::HasValueAddr(MethodTable* nullableMT) { LIMITED_METHOD_CONTRACT; _ASSERTE(strcmp(nullableMT->GetApproxFieldDescListRaw()[0].GetDebugName(), "hasValue") == 0); _ASSERTE(nullableMT->GetApproxFieldDescListRaw()[0].GetOffset() == 0); return (CLR_BOOL*) this; } //=============================================================================== void* Nullable::ValueAddr(MethodTable* nullableMT) { LIMITED_METHOD_CONTRACT; _ASSERTE(strcmp(nullableMT->GetApproxFieldDescListRaw()[1].GetDebugName(), "value") == 0); return (((BYTE*) this) + nullableMT->GetApproxFieldDescListRaw()[1].GetOffset()); } //=============================================================================== // Special Logic to box a nullable<T> as a boxed<T> OBJECTREF Nullable::Box(void* srcPtr, MethodTable* nullableMT) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; } CONTRACTL_END; FAULT_NOT_FATAL(); // FIX_NOW: why do we need this? Nullable* src = (Nullable*) srcPtr; _ASSERTE(IsNullableType(nullableMT)); // We better have a concrete instantiation, or our field offset asserts are not useful _ASSERTE(!nullableMT->ContainsGenericVariables()); if (!*src->HasValueAddr(nullableMT)) return NULL; OBJECTREF obj = 0; GCPROTECT_BEGININTERIOR (src); MethodTable* argMT = nullableMT->GetInstantiation()[0].AsMethodTable(); obj = argMT->Allocate(); CopyValueClass(obj->UnBox(), src->ValueAddr(nullableMT), argMT); GCPROTECT_END (); return obj; } //=============================================================================== // Special Logic to unbox a boxed T as a nullable<T> BOOL Nullable::UnBox(void* destPtr, OBJECTREF boxedVal, MethodTable* destMT) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; } CONTRACTL_END; Nullable* dest = (Nullable*) destPtr; BOOL fRet = TRUE; // We should only get here if we are unboxing a T as a Nullable<T> _ASSERTE(IsNullableType(destMT)); // We better have a concrete instantiation, or our field offset asserts are not useful _ASSERTE(!destMT->ContainsGenericVariables()); if (boxedVal == NULL) { // Logically we are doing *dest->HasValueAddr(destMT) = false; // We zero out the whole structure becasue it may contain GC references // and these need to be initialized to zero. (could optimize in the non-GC case) InitValueClass(destPtr, destMT); fRet = TRUE; } else { GCPROTECT_BEGIN(boxedVal); if (!IsNullableForType(destMT, boxedVal->GetMethodTable())) { // For safety's sake, also allow true nullables to be unboxed normally. // This should not happen normally, but we want to be robust if (destMT->IsEquivalentTo(boxedVal->GetMethodTable())) { CopyValueClass(dest, boxedVal->GetData(), destMT); fRet = TRUE; } else { fRet = FALSE; } } else { *dest->HasValueAddr(destMT) = true; CopyValueClass(dest->ValueAddr(destMT), boxedVal->UnBox(), boxedVal->GetMethodTable()); fRet = TRUE; } GCPROTECT_END(); } return fRet; } //=============================================================================== // Special Logic to unbox a boxed T as a nullable<T> // Does not handle type equivalence (may conservatively return FALSE) BOOL Nullable::UnBoxNoGC(void* destPtr, OBJECTREF boxedVal, MethodTable* destMT) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_COOPERATIVE; } CONTRACTL_END; Nullable* dest = (Nullable*) destPtr; // We should only get here if we are unboxing a T as a Nullable<T> _ASSERTE(IsNullableType(destMT)); // We better have a concrete instantiation, or our field offset asserts are not useful _ASSERTE(!destMT->ContainsGenericVariables()); if (boxedVal == NULL) { // Logically we are doing *dest->HasValueAddr(destMT) = false; // We zero out the whole structure becasue it may contain GC references // and these need to be initialized to zero. (could optimize in the non-GC case) InitValueClass(destPtr, destMT); } else { if (!IsNullableForTypeNoGC(destMT, boxedVal->GetMethodTable())) { // For safety's sake, also allow true nullables to be unboxed normally. // This should not happen normally, but we want to be robust if (destMT == boxedVal->GetMethodTable()) { CopyValueClass(dest, boxedVal->GetData(), destMT); return TRUE; } return FALSE; } *dest->HasValueAddr(destMT) = true; CopyValueClass(dest->ValueAddr(destMT), boxedVal->UnBox(), boxedVal->GetMethodTable()); } return TRUE; } //=============================================================================== // Special Logic to unbox a boxed T as a nullable<T> into an argument // specified by the argDest. // Does not handle type equivalence (may conservatively return FALSE) BOOL Nullable::UnBoxIntoArgNoGC(ArgDestination *argDest, OBJECTREF boxedVal, MethodTable* destMT) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_COOPERATIVE; } CONTRACTL_END; #if defined(UNIX_AMD64_ABI) if (argDest->IsStructPassedInRegs()) { // We should only get here if we are unboxing a T as a Nullable<T> _ASSERTE(IsNullableType(destMT)); // We better have a concrete instantiation, or our field offset asserts are not useful _ASSERTE(!destMT->ContainsGenericVariables()); if (boxedVal == NULL) { // Logically we are doing *dest->HasValueAddr(destMT) = false; // We zero out the whole structure becasue it may contain GC references // and these need to be initialized to zero. (could optimize in the non-GC case) InitValueClassArg(argDest, destMT); } else { if (!IsNullableForTypeNoGC(destMT, boxedVal->GetMethodTable())) { // For safety's sake, also allow true nullables to be unboxed normally. // This should not happen normally, but we want to be robust if (destMT == boxedVal->GetMethodTable()) { CopyValueClassArg(argDest, boxedVal->GetData(), destMT, 0); return TRUE; } return FALSE; } Nullable* dest = (Nullable*)argDest->GetStructGenRegDestinationAddress(); *dest->HasValueAddr(destMT) = true; int destOffset = (BYTE*)dest->ValueAddr(destMT) - (BYTE*)dest; CopyValueClassArg(argDest, boxedVal->UnBox(), boxedVal->GetMethodTable(), destOffset); } return TRUE; } #endif // UNIX_AMD64_ABI return UnBoxNoGC(argDest->GetDestinationAddress(), boxedVal, destMT); } //=============================================================================== // Special Logic to unbox a boxed T as a nullable<T> // Does not do any type checks. void Nullable::UnBoxNoCheck(void* destPtr, OBJECTREF boxedVal, MethodTable* destMT) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_COOPERATIVE; } CONTRACTL_END; Nullable* dest = (Nullable*) destPtr; // We should only get here if we are unboxing a T as a Nullable<T> _ASSERTE(IsNullableType(destMT)); // We better have a concrete instantiation, or our field offset asserts are not useful _ASSERTE(!destMT->ContainsGenericVariables()); if (boxedVal == NULL) { // Logically we are doing *dest->HasValueAddr(destMT) = false; // We zero out the whole structure becasue it may contain GC references // and these need to be initialized to zero. (could optimize in the non-GC case) InitValueClass(destPtr, destMT); } else { if (IsNullableType(boxedVal->GetMethodTable())) { // For safety's sake, also allow true nullables to be unboxed normally. // This should not happen normally, but we want to be robust CopyValueClass(dest, boxedVal->GetData(), destMT); } *dest->HasValueAddr(destMT) = true; CopyValueClass(dest->ValueAddr(destMT), boxedVal->UnBox(), boxedVal->GetMethodTable()); } } //=============================================================================== // a boxed Nullable<T> should either be null or a boxed T, but sometimes it is // useful to have a 'true' boxed Nullable<T> (that is it has two fields). This // function returns a 'normalized' version of this pointer. OBJECTREF Nullable::NormalizeBox(OBJECTREF obj) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; } CONTRACTL_END; if (obj != NULL) { MethodTable* retMT = obj->GetMethodTable(); if (Nullable::IsNullableType(retMT)) obj = Nullable::Box(obj->GetData(), retMT); } return obj; } void ThreadBaseObject::SetInternal(Thread *it) { WRAPPER_NO_CONTRACT; // only allow a transition from NULL to non-NULL _ASSERTE((m_InternalThread == NULL) && (it != NULL)); m_InternalThread = it; // Now the native Thread will only be destroyed after the managed Thread is collected. // Tell the GC that the managed Thread actually represents much more memory. GCInterface::AddMemoryPressure(sizeof(Thread)); } void ThreadBaseObject::ClearInternal() { WRAPPER_NO_CONTRACT; _ASSERTE(m_InternalThread != NULL); m_InternalThread = NULL; GCInterface::RemoveMemoryPressure(sizeof(Thread)); } #endif // #ifndef DACCESS_COMPILE StackTraceElement const & StackTraceArray::operator[](size_t index) const { WRAPPER_NO_CONTRACT; return GetData()[index]; } StackTraceElement & StackTraceArray::operator[](size_t index) { WRAPPER_NO_CONTRACT; return GetData()[index]; } #if !defined(DACCESS_COMPILE) // Define the lock used to access stacktrace from an exception object SpinLock g_StackTraceArrayLock; void ExceptionObject::SetStackTrace(I1ARRAYREF stackTrace, PTRARRAYREF dynamicMethodArray) { CONTRACTL { GC_NOTRIGGER; NOTHROW; MODE_COOPERATIVE; } CONTRACTL_END; #ifdef STRESS_LOG if (StressLog::StressLogOn(~0u, 0)) { StressLog::CreateThreadStressLog(); } #endif SpinLock::AcquireLock(&g_StackTraceArrayLock); SetObjectReference((OBJECTREF*)&_stackTrace, (OBJECTREF)stackTrace); SetObjectReference((OBJECTREF*)&_dynamicMethods, (OBJECTREF)dynamicMethodArray); SpinLock::ReleaseLock(&g_StackTraceArrayLock); } #endif // !defined(DACCESS_COMPILE) void ExceptionObject::GetStackTrace(StackTraceArray & stackTrace, PTRARRAYREF * outDynamicMethodArray /*= NULL*/) const { CONTRACTL { GC_NOTRIGGER; NOTHROW; MODE_COOPERATIVE; } CONTRACTL_END; #if !defined(DACCESS_COMPILE) SpinLock::AcquireLock(&g_StackTraceArrayLock); #endif // !defined(DACCESS_COMPILE) StackTraceArray temp(_stackTrace); stackTrace.Swap(temp); if (outDynamicMethodArray != NULL) { *outDynamicMethodArray = _dynamicMethods; } #if !defined(DACCESS_COMPILE) SpinLock::ReleaseLock(&g_StackTraceArrayLock); #endif // !defined(DACCESS_COMPILE) } bool LAHashDependentHashTrackerObject::IsLoaderAllocatorLive() { return (ObjectFromHandle(_dependentHandle) != NULL); } void LAHashDependentHashTrackerObject::GetDependentAndLoaderAllocator(OBJECTREF *pLoaderAllocatorRef, GCHEAPHASHOBJECTREF *pGCHeapHash) { OBJECTREF primary = ObjectFromHandle(_dependentHandle); if (pLoaderAllocatorRef != NULL) *pLoaderAllocatorRef = primary; IGCHandleManager *mgr = GCHandleUtilities::GetGCHandleManager(); // Secondary is tracked only if primary is non-null if (pGCHeapHash != NULL) *pGCHeapHash = (GCHEAPHASHOBJECTREF)(OBJECTREF)((primary != NULL) ? mgr->GetDependentHandleSecondary(_dependentHandle) : NULL); }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // OBJECT.CPP // // Definitions of a Com+ Object // #include "common.h" #include "vars.hpp" #include "class.h" #include "object.h" #include "threads.h" #include "excep.h" #include "eeconfig.h" #include "gcheaputilities.h" #include "field.h" #include "argdestination.h" SVAL_IMPL(INT32, ArrayBase, s_arrayBoundsZero); // follow the necessary rules to get a new valid hashcode for an object DWORD Object::ComputeHashCode() { DWORD hashCode; // note that this algorithm now uses at most HASHCODE_BITS so that it will // fit into the objheader if the hashcode has to be moved back into the objheader // such as for an object that is being frozen do { // we use the high order bits in this case because they're more random hashCode = GetThread()->GetNewHashCode() >> (32-HASHCODE_BITS); } while (hashCode == 0); // need to enforce hashCode != 0 // verify that it really fits into HASHCODE_BITS _ASSERTE((hashCode & ((1<<HASHCODE_BITS)-1)) == hashCode); return hashCode; } #ifndef DACCESS_COMPILE INT32 Object::GetHashCodeEx() { CONTRACTL { MODE_COOPERATIVE; THROWS; GC_NOTRIGGER; } CONTRACTL_END // This loop exists because we're inspecting the header dword of the object // and it may change under us because of races with other threads. // On top of that, it may have the spin lock bit set, in which case we're // not supposed to change it. // In all of these case, we need to retry the operation. DWORD iter = 0; DWORD dwSwitchCount = 0; while (true) { DWORD bits = GetHeader()->GetBits(); if (bits & BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX) { if (bits & BIT_SBLK_IS_HASHCODE) { // Common case: the object already has a hash code return bits & MASK_HASHCODE; } else { // We have a sync block index. This means if we already have a hash code, // it is in the sync block, otherwise we generate a new one and store it there SyncBlock *psb = GetSyncBlock(); DWORD hashCode = psb->GetHashCode(); if (hashCode != 0) return hashCode; hashCode = ComputeHashCode(); return psb->SetHashCode(hashCode); } } else { // If a thread is holding the thin lock we need a syncblock if ((bits & (SBLK_MASK_LOCK_THREADID)) != 0) { GetSyncBlock(); // No need to replicate the above code dealing with sync blocks // here - in the next iteration of the loop, we'll realize // we have a syncblock, and we'll do the right thing. } else { // We want to change the header in this case, so we have to check the BIT_SBLK_SPIN_LOCK bit first if (bits & BIT_SBLK_SPIN_LOCK) { iter++; if ((iter % 1024) != 0 && g_SystemInfo.dwNumberOfProcessors > 1) { YieldProcessorNormalized(); // indicate to the processor that we are spinning } else { __SwitchToThread(0, ++dwSwitchCount); } continue; } DWORD hashCode = ComputeHashCode(); DWORD newBits = bits | BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX | BIT_SBLK_IS_HASHCODE | hashCode; if (GetHeader()->SetBits(newBits, bits) == bits) return hashCode; // Header changed under us - let's restart this whole thing. } } } } #endif // #ifndef DACCESS_COMPILE BOOL Object::ValidateObjectWithPossibleAV() { CANNOT_HAVE_CONTRACT; SUPPORTS_DAC; return GetGCSafeMethodTable()->ValidateWithPossibleAV(); } #ifndef DACCESS_COMPILE // There are cases where it is not possible to get a type handle during a GC. // If we can get the type handle, this method will return it. // Otherwise, the method will return NULL. TypeHandle Object::GetGCSafeTypeHandleIfPossible() const { CONTRACTL { NOTHROW; GC_NOTRIGGER; if(!IsGCThread()) { MODE_COOPERATIVE; } } CONTRACTL_END; // Although getting the type handle is unsafe and could cause recursive type lookups // in some cases, it's always safe and straightforward to get to the MethodTable. MethodTable * pMT = GetGCSafeMethodTable(); _ASSERTE(pMT != NULL); if (pMT == g_pFreeObjectMethodTable) { return NULL; } // Don't look at types that belong to an unloading AppDomain, or else // pObj->GetGCSafeTypeHandle() can AV. For example, we encountered this AV when pObj // was an array like this: // // MyValueType1<MyValueType2>[] myArray // // where MyValueType1<T> & MyValueType2 are defined in different assemblies. In such // a case, looking up the type handle for myArray requires looking in // MyValueType1<T>'s module's m_AssemblyRefByNameTable, which is garbage if its // AppDomain is unloading. // // Another AV was encountered in a similar case, // // MyRefType1<MyRefType2>[] myArray // // where MyRefType2's module was unloaded by the time the GC occurred. In at least // one case, the GC was caused by the AD unload itself (AppDomain::Unload -> // AppDomain::Exit -> GCInterface::AddMemoryPressure -> WKS::GCHeapUtilities::GarbageCollect). // // To protect against all scenarios, verify that // // * The MT of the object is not getting unloaded, OR // * In the case of arrays (potentially of arrays of arrays of arrays ...), the // MT of the innermost element is not getting unloaded. This then ensures the // MT of the original object (i.e., array) itself must not be getting // unloaded either, since the MTs of arrays and of their elements are // allocated on the same loader allocator. Module * pLoaderModule = pMT->GetLoaderModule(); // Don't look up types that are unloading due to Collectible Assemblies. Haven't been // able to find a case where we actually encounter objects like this that can cause // problems; however, it seems prudent to add this protection just in case. LoaderAllocator * pLoaderAllocator = pLoaderModule->GetLoaderAllocator(); _ASSERTE(pLoaderAllocator != NULL); if ((pLoaderAllocator->IsCollectible()) && (ObjectHandleIsNull(pLoaderAllocator->GetLoaderAllocatorObjectHandle()))) { return NULL; } // Ok, it should now be safe to get the type handle return GetGCSafeTypeHandle(); } /* static */ BOOL Object::SupportsInterface(OBJECTREF pObj, MethodTable* pInterfaceMT) { CONTRACTL { THROWS; GC_TRIGGERS; INJECT_FAULT(COMPlusThrowOM()); PRECONDITION(CheckPointer(pInterfaceMT)); PRECONDITION(pObj->GetMethodTable()->IsRestored_NoLogging()); PRECONDITION(pInterfaceMT->IsInterface()); } CONTRACTL_END BOOL bSupportsItf = FALSE; GCPROTECT_BEGIN(pObj) { // Make sure the interface method table has been restored. pInterfaceMT->CheckRestore(); // Check to see if the static class definition indicates we implement the interface. MethodTable * pMT = pObj->GetMethodTable(); if (pMT->CanCastToInterface(pInterfaceMT)) { bSupportsItf = TRUE; } #ifdef FEATURE_COMINTEROP else if (pMT->IsComObjectType()) { // If this is a COM object, the static class definition might not be complete so we need // to check if the COM object implements the interface. bSupportsItf = ComObject::SupportsInterface(pObj, pInterfaceMT); } #endif // FEATURE_COMINTEROP } GCPROTECT_END(); return bSupportsItf; } Assembly *AssemblyBaseObject::GetAssembly() { WRAPPER_NO_CONTRACT; return m_pAssembly->GetAssembly(); } STRINGREF AllocateString(SString sstr) { CONTRACTL { THROWS; GC_TRIGGERS; } CONTRACTL_END; COUNT_T length = sstr.GetCount(); // count of WCHARs excluding terminating NULL STRINGREF strObj = AllocateString(length); memcpyNoGCRefs(strObj->GetBuffer(), sstr.GetUnicode(), length*sizeof(WCHAR)); return strObj; } CHARARRAYREF AllocateCharArray(DWORD dwArrayLength) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; } CONTRACTL_END; return (CHARARRAYREF)AllocatePrimitiveArray(ELEMENT_TYPE_CHAR, dwArrayLength); } void Object::ValidateHeap(BOOL bDeep) { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_FORBID_FAULT; #if defined (VERIFY_HEAP) //no need to verify next object's header in this case //since this is called in verify_heap, which will verfiy every object anyway Validate(bDeep, FALSE); #endif } void Object::SetOffsetObjectRef(DWORD dwOffset, size_t dwValue) { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_FORBID_FAULT; STATIC_CONTRACT_MODE_COOPERATIVE; OBJECTREF* location; OBJECTREF o; location = (OBJECTREF *) &GetData()[dwOffset]; o = ObjectToOBJECTREF(*(Object **) &dwValue); SetObjectReference( location, o ); } void SetObjectReferenceUnchecked(OBJECTREF *dst,OBJECTREF ref) { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_FORBID_FAULT; STATIC_CONTRACT_MODE_COOPERATIVE; STATIC_CONTRACT_CANNOT_TAKE_LOCK; // Assign value. We use casting to avoid going thru the overloaded // OBJECTREF= operator which in this case would trigger a false // write-barrier violation assert. VolatileStore((Object**)dst, OBJECTREFToObject(ref)); #ifdef _DEBUG Thread::ObjectRefAssign(dst); #endif ErectWriteBarrier(dst, ref); } void STDCALL CopyValueClassUnchecked(void* dest, void* src, MethodTable *pMT) { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_FORBID_FAULT; STATIC_CONTRACT_MODE_COOPERATIVE; _ASSERTE(!pMT->IsArray()); // bunch of assumptions about arrays wrong. if (pMT->ContainsPointers()) { memmoveGCRefs(dest, src, pMT->GetNumInstanceFieldBytes()); } else { switch (pMT->GetNumInstanceFieldBytes()) { case 1: *(UINT8*)dest = *(UINT8*)src; break; #ifndef ALIGN_ACCESS // we can hit an alignment fault if the value type has multiple // smaller fields. Example: if there are two I4 fields, the // value class can be aligned to 4-byte boundaries, yet the // NumInstanceFieldBytes is 8 case 2: *(UINT16*)dest = *(UINT16*)src; break; case 4: *(UINT32*)dest = *(UINT32*)src; break; case 8: *(UINT64*)dest = *(UINT64*)src; break; #endif // !ALIGN_ACCESS default: memcpyNoGCRefs(dest, src, pMT->GetNumInstanceFieldBytes()); break; } } } // Copy value class into the argument specified by the argDest. // The destOffset is nonzero when copying values into Nullable<T>, it is the offset // of the T value inside of the Nullable<T> void STDCALL CopyValueClassArgUnchecked(ArgDestination *argDest, void* src, MethodTable *pMT, int destOffset) { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_FORBID_FAULT; STATIC_CONTRACT_MODE_COOPERATIVE; #if defined(UNIX_AMD64_ABI) if (argDest->IsStructPassedInRegs()) { argDest->CopyStructToRegisters(src, pMT->GetNumInstanceFieldBytes(), destOffset); return; } #elif defined(TARGET_ARM64) if (argDest->IsHFA()) { argDest->CopyHFAStructToRegister(src, pMT->GetNumInstanceFieldBytes()); return; } #endif // UNIX_AMD64_ABI // destOffset is only valid for Nullable<T> passed in registers _ASSERTE(destOffset == 0); CopyValueClassUnchecked(argDest->GetDestinationAddress(), src, pMT); } // Initialize the value class argument to zeros void InitValueClassArg(ArgDestination *argDest, MethodTable *pMT) { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_FORBID_FAULT; STATIC_CONTRACT_MODE_COOPERATIVE; #if defined(UNIX_AMD64_ABI) if (argDest->IsStructPassedInRegs()) { argDest->ZeroStructInRegisters(pMT->GetNumInstanceFieldBytes()); return; } #endif InitValueClass(argDest->GetDestinationAddress(), pMT); } #if defined (VERIFY_HEAP) #include "dbginterface.h" // make the checking code goes as fast as possible! #if defined(_MSC_VER) #pragma optimize("tgy", on) #endif #define CREATE_CHECK_STRING(x) #x #define CHECK_AND_TEAR_DOWN(x) \ do{ \ if (!(x)) \ { \ _ASSERTE(!CREATE_CHECK_STRING(x)); \ EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE); \ } \ } while (0) VOID Object::Validate(BOOL bDeep, BOOL bVerifyNextHeader, BOOL bVerifySyncBlock) { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_FORBID_FAULT; STATIC_CONTRACT_MODE_COOPERATIVE; STATIC_CONTRACT_CANNOT_TAKE_LOCK; if (g_IBCLogger.InstrEnabled() && !GCStress<cfg_any>::IsEnabled()) { // If we are instrumenting for IBC (and GCStress is not enabled) // then skip these Object::Validate() as they slow down the // instrument phase by an order of magnitude return; } if (g_fEEShutDown & ShutDown_Phase2) { // During second phase of shutdown the code below is not guaranteed to work. return; } #ifdef _DEBUG { Thread *pThread = GetThreadNULLOk(); if (pThread != NULL && !(pThread->PreemptiveGCDisabled())) { // Debugger helper threads are special in that they take over for // what would normally be a nonEE thread (the RCThread). If an // EE thread is doing RCThread duty, then it should be treated // as such. // // There are some GC threads in the same kind of category. Note that // GetThread() sometimes returns them, if DLL_THREAD_ATTACH notifications // have run some managed code. if (!dbgOnly_IsSpecialEEThread() && !IsGCSpecialThread()) _ASSERTE(!"OBJECTREF being accessed while thread is in preemptive GC mode."); } } #endif { // ValidateInner can throw or fault on failure which violates contract. CONTRACT_VIOLATION(ThrowsViolation | FaultViolation); // using inner helper because of TRY and stack objects with destructors. ValidateInner(bDeep, bVerifyNextHeader, bVerifySyncBlock); } } VOID Object::ValidateInner(BOOL bDeep, BOOL bVerifyNextHeader, BOOL bVerifySyncBlock) { STATIC_CONTRACT_THROWS; // See CONTRACT_VIOLATION above STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_FAULT; // See CONTRACT_VIOLATION above STATIC_CONTRACT_MODE_COOPERATIVE; STATIC_CONTRACT_CANNOT_TAKE_LOCK; int lastTest = 0; EX_TRY { // in order to avoid contract violations in the EH code we'll allow AVs here, // they'll be handled in the catch block AVInRuntimeImplOkayHolder avOk; MethodTable *pMT = GetGCSafeMethodTable(); lastTest = 1; CHECK_AND_TEAR_DOWN(pMT && pMT->Validate()); lastTest = 2; bool noRangeChecks = (g_pConfig->GetHeapVerifyLevel() & EEConfig::HEAPVERIFY_NO_RANGE_CHECKS) == EEConfig::HEAPVERIFY_NO_RANGE_CHECKS; // noRangeChecks depends on initial values being FALSE BOOL bSmallObjectHeapPtr = FALSE, bLargeObjectHeapPtr = FALSE; if (!noRangeChecks) { bSmallObjectHeapPtr = GCHeapUtilities::GetGCHeap()->IsHeapPointer(this, true); if (!bSmallObjectHeapPtr) bLargeObjectHeapPtr = GCHeapUtilities::GetGCHeap()->IsHeapPointer(this); CHECK_AND_TEAR_DOWN(bSmallObjectHeapPtr || bLargeObjectHeapPtr); } lastTest = 3; if (bDeep) { CHECK_AND_TEAR_DOWN(GetHeader()->Validate(bVerifySyncBlock)); } lastTest = 4; if (bDeep && (g_pConfig->GetHeapVerifyLevel() & EEConfig::HEAPVERIFY_GC)) { GCHeapUtilities::GetGCHeap()->ValidateObjectMember(this); } lastTest = 5; // since bSmallObjectHeapPtr is initialized to FALSE // we skip checking noRangeChecks since if skipping // is enabled bSmallObjectHeapPtr will always be false. if (bSmallObjectHeapPtr) { CHECK_AND_TEAR_DOWN(!GCHeapUtilities::GetGCHeap()->IsLargeObject(this)); } lastTest = 6; lastTest = 7; _ASSERTE(GCHeapUtilities::IsGCHeapInitialized()); // try to validate next object's header if (bDeep && bVerifyNextHeader && GCHeapUtilities::GetGCHeap()->RuntimeStructuresValid() //NextObj could be very slow if concurrent GC is going on && !GCHeapUtilities::GetGCHeap ()->IsConcurrentGCInProgress ()) { Object * nextObj = GCHeapUtilities::GetGCHeap ()->NextObj (this); if ((nextObj != NULL) && (nextObj->GetGCSafeMethodTable() != g_pFreeObjectMethodTable)) { // we need a read barrier here - to make sure we read the object header _after_ // reading data that tells us that the object is eligible for verification // (also see: gc.cpp/a_fit_segment_end_p) VOLATILE_MEMORY_BARRIER(); CHECK_AND_TEAR_DOWN(nextObj->GetHeader()->Validate(FALSE)); } } lastTest = 8; #ifdef FEATURE_64BIT_ALIGNMENT if (pMT->RequiresAlign8()) { CHECK_AND_TEAR_DOWN((((size_t)this) & 0x7) == (pMT->IsValueType()? 4:0)); } lastTest = 9; #endif // FEATURE_64BIT_ALIGNMENT } EX_CATCH { STRESS_LOG3(LF_ASSERT, LL_ALWAYS, "Detected use of corrupted OBJECTREF: %p [MT=%p] (lastTest=%d)", this, lastTest > 0 ? (*(size_t*)this) : 0, lastTest); CHECK_AND_TEAR_DOWN(!"Detected use of a corrupted OBJECTREF. Possible GC hole."); } EX_END_CATCH(SwallowAllExceptions); } #endif // VERIFY_HEAP /*==================================NewString=================================== **Action: Creates a System.String object. **Returns: **Arguments: **Exceptions: ==============================================================================*/ STRINGREF StringObject::NewString(INT32 length) { CONTRACTL { GC_TRIGGERS; MODE_COOPERATIVE; PRECONDITION(length>=0); } CONTRACTL_END; STRINGREF pString; if (length<0) { return NULL; } else if (length == 0) { return GetEmptyString(); } else { pString = AllocateString(length); _ASSERTE(pString->GetBuffer()[length] == 0); return pString; } } /*==================================NewString=================================== **Action: Many years ago, VB didn't have the concept of a byte array, so enterprising ** users created one by allocating a BSTR with an odd length and using it to ** store bytes. A generation later, we're still stuck supporting this behavior. ** The way that we do this is to take advantage of the difference between the ** array length and the string length. The string length will always be the ** number of characters between the start of the string and the terminating 0. ** If we need an odd number of bytes, we'll take one wchar after the terminating 0. ** (e.g. at position StringLength+1). The high-order byte of this wchar is ** reserved for flags and the low-order byte is our odd byte. This function is ** used to allocate a string of that shape, but we don't actually mark the ** trailing byte as being in use yet. **Returns: A newly allocated string. Null if length is less than 0. **Arguments: length -- the length of the string to allocate ** bHasTrailByte -- whether the string also has a trailing byte. **Exceptions: OutOfMemoryException if AllocateString fails. ==============================================================================*/ STRINGREF StringObject::NewString(INT32 length, BOOL bHasTrailByte) { CONTRACTL { GC_TRIGGERS; MODE_COOPERATIVE; PRECONDITION(length>=0 && length != INT32_MAX); } CONTRACTL_END; STRINGREF pString; if (length<0 || length == INT32_MAX) { return NULL; } else if (length == 0) { return GetEmptyString(); } else { pString = AllocateString(length); _ASSERTE(pString->GetBuffer()[length]==0); if (bHasTrailByte) { _ASSERTE(pString->GetBuffer()[length+1]==0); } } return pString; } //======================================================================== // Creates a System.String object and initializes from // the supplied null-terminated C string. // // Maps NULL to null. This function does *not* return null to indicate // error situations: it throws an exception instead. //======================================================================== STRINGREF StringObject::NewString(const WCHAR *pwsz) { CONTRACTL { GC_TRIGGERS; MODE_COOPERATIVE; } CONTRACTL_END; if (!pwsz) { return NULL; } else { DWORD nch = (DWORD)wcslen(pwsz); if (nch==0) { return GetEmptyString(); } #if 0 // // This assert is disabled because it is valid for us to get a // pointer from the gc heap here as long as it is pinned. This // can happen when a string is marshalled to unmanaged by // pinning and then later put into a struct and that struct is // then marshalled to managed. // _ASSERTE(!GCHeapUtilities::GetGCHeap()->IsHeapPointer((BYTE *) pwsz) || !"pwsz can not point to GC Heap"); #endif // 0 STRINGREF pString = AllocateString( nch ); memcpyNoGCRefs(pString->GetBuffer(), pwsz, nch*sizeof(WCHAR)); _ASSERTE(pString->GetBuffer()[nch] == 0); return pString; } } #if defined(_MSC_VER) && defined(TARGET_X86) #pragma optimize("y", on) // Small critical routines, don't put in EBP frame #endif STRINGREF StringObject::NewString(const WCHAR *pwsz, int length) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; PRECONDITION(length>=0); } CONTRACTL_END; if (!pwsz) { return NULL; } else if (length <= 0) { return GetEmptyString(); } else { #if 0 // // This assert is disabled because it is valid for us to get a // pointer from the gc heap here as long as it is pinned. This // can happen when a string is marshalled to unmanaged by // pinning and then later put into a struct and that struct is // then marshalled to managed. // _ASSERTE(!GCHeapUtilities::GetGCHeap()->IsHeapPointer((BYTE *) pwsz) || !"pwsz can not point to GC Heap"); #endif // 0 STRINGREF pString = AllocateString(length); memcpyNoGCRefs(pString->GetBuffer(), pwsz, length*sizeof(WCHAR)); _ASSERTE(pString->GetBuffer()[length] == 0); return pString; } } #if defined(_MSC_VER) && defined(TARGET_X86) #pragma optimize("", on) // Go back to command line default optimizations #endif STRINGREF StringObject::NewString(LPCUTF8 psz) { CONTRACTL { GC_TRIGGERS; MODE_COOPERATIVE; THROWS; PRECONDITION(CheckPointer(psz)); } CONTRACTL_END; int length = (int)strlen(psz); if (length == 0) { return GetEmptyString(); } CQuickBytes qb; WCHAR* pwsz = (WCHAR*) qb.AllocThrows((length) * sizeof(WCHAR)); length = WszMultiByteToWideChar(CP_UTF8, 0, psz, length, pwsz, length); if (length == 0) { COMPlusThrow(kArgumentException, W("Arg_InvalidUTF8String")); } return NewString(pwsz, length); } STRINGREF StringObject::NewString(LPCUTF8 psz, int cBytes) { CONTRACTL { GC_TRIGGERS; MODE_COOPERATIVE; THROWS; PRECONDITION(CheckPointer(psz, NULL_OK)); } CONTRACTL_END; if (!psz) return NULL; _ASSERTE(psz); _ASSERTE(cBytes >= 0); if (cBytes == 0) { return GetEmptyString(); } int cWszBytes = 0; if (!ClrSafeInt<int>::multiply(cBytes, sizeof(WCHAR), cWszBytes)) COMPlusThrowOM(); CQuickBytes qb; WCHAR* pwsz = (WCHAR*) qb.AllocThrows(cWszBytes); int length = WszMultiByteToWideChar(CP_UTF8, 0, psz, cBytes, pwsz, cBytes); if (length == 0) { COMPlusThrow(kArgumentException, W("Arg_InvalidUTF8String")); } return NewString(pwsz, length); } // // // STATIC MEMBER VARIABLES // // STRINGREF* StringObject::EmptyStringRefPtr=NULL; //The special string helpers are used as flag bits for weird strings that have bytes //after the terminating 0. The only case where we use this right now is the VB BSTR as //byte array which is described in MakeStringAsByteArrayFromBytes. #define SPECIAL_STRING_VB_BYTE_ARRAY 0x100 FORCEINLINE BOOL MARKS_VB_BYTE_ARRAY(WCHAR x) { return static_cast<BOOL>(x & SPECIAL_STRING_VB_BYTE_ARRAY); } FORCEINLINE WCHAR MAKE_VB_TRAIL_BYTE(BYTE x) { return static_cast<WCHAR>(x) | SPECIAL_STRING_VB_BYTE_ARRAY; } FORCEINLINE BYTE GET_VB_TRAIL_BYTE(WCHAR x) { return static_cast<BYTE>(x & 0xFF); } /*==============================InitEmptyStringRefPtr============================ **Action: Gets an empty string refptr, cache the result. **Returns: The retrieved STRINGREF. ==============================================================================*/ STRINGREF* StringObject::InitEmptyStringRefPtr() { CONTRACTL { THROWS; MODE_ANY; GC_TRIGGERS; } CONTRACTL_END; GCX_COOP(); EEStringData data(0, W(""), TRUE); EmptyStringRefPtr = SystemDomain::System()->DefaultDomain()->GetLoaderAllocator()->GetStringObjRefPtrFromUnicodeString(&data); return EmptyStringRefPtr; } // strAChars must be null-terminated, with an appropriate aLength // strBChars must be null-terminated, with an appropriate bLength OR bLength == -1 // If bLength == -1, we stop on the first null character in strBChars BOOL StringObject::CaseInsensitiveCompHelper(_In_reads_(aLength) WCHAR *strAChars, _In_z_ INT8 *strBChars, INT32 aLength, INT32 bLength, INT32 *result) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; PRECONDITION(CheckPointer(strAChars)); PRECONDITION(CheckPointer(strBChars)); PRECONDITION(CheckPointer(result)); } CONTRACTL_END; WCHAR *strAStart = strAChars; INT8 *strBStart = strBChars; unsigned charA; unsigned charB; for(;;) { charA = *strAChars; charB = (unsigned) *strBChars; //Case-insensitive comparison on chars greater than 0x7F //requires a locale-aware casing operation and we're not going there. if ((charA|charB)>0x7F) { *result = 0; return FALSE; } // uppercase both chars. if (charA>='a' && charA<='z') { charA ^= 0x20; } if (charB>='a' && charB<='z') { charB ^= 0x20; } //Return the (case-insensitive) difference between them. if (charA!=charB) { *result = (int)(charA-charB); return TRUE; } if (charA==0) // both strings have null character { if (bLength == -1) { *result = aLength - static_cast<INT32>(strAChars - strAStart); return TRUE; } if (strAChars==strAStart + aLength || strBChars==strBStart + bLength) { *result = aLength - bLength; return TRUE; } // else both embedded zeros } // Next char strAChars++; strBChars++; } } /*============================InternalTrailByteCheck============================ **Action: Many years ago, VB didn't have the concept of a byte array, so enterprising ** users created one by allocating a BSTR with an odd length and using it to ** store bytes. A generation later, we're still stuck supporting this behavior. ** The way that we do this is stick the trail byte in the sync block ** whenever we encounter such a situation. Since we expect this to be a very corner case ** accessing the sync block seems like a good enough solution ** **Returns: True if <CODE>str</CODE> contains a VB trail byte, false otherwise. **Arguments: str -- The string to be examined. **Exceptions: None ==============================================================================*/ BOOL StringObject::HasTrailByte() { WRAPPER_NO_CONTRACT; SyncBlock * pSyncBlock = PassiveGetSyncBlock(); if(pSyncBlock != NULL) { return pSyncBlock->HasCOMBstrTrailByte(); } return FALSE; } /*=================================GetTrailByte================================= **Action: If <CODE>str</CODE> contains a vb trail byte, returns a copy of it. **Returns: True if <CODE>str</CODE> contains a trail byte. *bTrailByte is set to ** the byte in question if <CODE>str</CODE> does have a trail byte, otherwise ** it's set to 0. **Arguments: str -- The string being examined. ** bTrailByte -- An out param to hold the value of the trail byte. **Exceptions: None. ==============================================================================*/ BOOL StringObject::GetTrailByte(BYTE *bTrailByte) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; _ASSERTE(bTrailByte); *bTrailByte=0; BOOL retValue = HasTrailByte(); if(retValue) { *bTrailByte = GET_VB_TRAIL_BYTE(GetHeader()->PassiveGetSyncBlock()->GetCOMBstrTrailByte()); } return retValue; } /*=================================SetTrailByte================================= **Action: Sets the trail byte in the sync block **Returns: True. **Arguments: str -- The string into which to set the trail byte. ** bTrailByte -- The trail byte to be added to the string. **Exceptions: None. ==============================================================================*/ BOOL StringObject::SetTrailByte(BYTE bTrailByte) { WRAPPER_NO_CONTRACT; GetHeader()->GetSyncBlock()->SetCOMBstrTrailByte(MAKE_VB_TRAIL_BYTE(bTrailByte)); return TRUE; } #ifdef USE_CHECKED_OBJECTREFS //------------------------------------------------------------- // Default constructor, for non-initializing declarations: // // OBJECTREF or; //------------------------------------------------------------- OBJECTREF::OBJECTREF() { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_FORBID_FAULT; m_asObj = (Object*)POISONC; Thread::ObjectRefNew(this); } //------------------------------------------------------------- // Copy constructor, for passing OBJECTREF's as function arguments. //------------------------------------------------------------- OBJECTREF::OBJECTREF(const OBJECTREF & objref) { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_MODE_COOPERATIVE; STATIC_CONTRACT_FORBID_FAULT; VALIDATEOBJECT(objref.m_asObj); // !!! If this assert is fired, there are two possibilities: // !!! 1. You are doing a type cast, e.g. *(OBJECTREF*)pObj // !!! Instead, you should use ObjectToOBJECTREF(*(Object**)pObj), // !!! or ObjectToSTRINGREF(*(StringObject**)pObj) // !!! 2. There is a real GC hole here. // !!! Either way you need to fix the code. _ASSERTE(Thread::IsObjRefValid(&objref)); if ((objref.m_asObj != 0) && ((IGCHeap*)GCHeapUtilities::GetGCHeap())->IsHeapPointer( (BYTE*)this )) { _ASSERTE(!"Write Barrier violation. Must use SetObjectReference() to assign OBJECTREF's into the GC heap!"); } m_asObj = objref.m_asObj; if (m_asObj != 0) { ENABLESTRESSHEAP(); } Thread::ObjectRefNew(this); } //------------------------------------------------------------- // VolatileLoadWithoutBarrier constructor //------------------------------------------------------------- OBJECTREF::OBJECTREF(const OBJECTREF *pObjref, tagVolatileLoadWithoutBarrier tag) { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_MODE_COOPERATIVE; STATIC_CONTRACT_FORBID_FAULT; Object* objrefAsObj = VolatileLoadWithoutBarrier(&pObjref->m_asObj); VALIDATEOBJECT(objrefAsObj); // !!! If this assert is fired, there are two possibilities: // !!! 1. You are doing a type cast, e.g. *(OBJECTREF*)pObj // !!! Instead, you should use ObjectToOBJECTREF(*(Object**)pObj), // !!! or ObjectToSTRINGREF(*(StringObject**)pObj) // !!! 2. There is a real GC hole here. // !!! Either way you need to fix the code. _ASSERTE(Thread::IsObjRefValid(pObjref)); if ((objrefAsObj != 0) && ((IGCHeap*)GCHeapUtilities::GetGCHeap())->IsHeapPointer( (BYTE*)this )) { _ASSERTE(!"Write Barrier violation. Must use SetObjectReference() to assign OBJECTREF's into the GC heap!"); } m_asObj = objrefAsObj; if (m_asObj != 0) { ENABLESTRESSHEAP(); } Thread::ObjectRefNew(this); } //------------------------------------------------------------- // To allow NULL to be used as an OBJECTREF. //------------------------------------------------------------- OBJECTREF::OBJECTREF(TADDR nul) { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_FORBID_FAULT; //_ASSERTE(nul == 0); m_asObj = (Object*)nul; if( m_asObj != NULL) { // REVISIT_TODO: fix this, why is this constructor being used for non-null object refs? STATIC_CONTRACT_VIOLATION(ModeViolation); VALIDATEOBJECT(m_asObj); ENABLESTRESSHEAP(); } Thread::ObjectRefNew(this); } //------------------------------------------------------------- // This is for the GC's use only. Non-GC code should never // use the "Object" class directly. The unused "int" argument // prevents C++ from using this to implicitly convert Object*'s // to OBJECTREF. //------------------------------------------------------------- OBJECTREF::OBJECTREF(Object *pObject) { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_MODE_COOPERATIVE; STATIC_CONTRACT_FORBID_FAULT; DEBUG_ONLY_FUNCTION; if ((pObject != 0) && ((IGCHeap*)GCHeapUtilities::GetGCHeap())->IsHeapPointer( (BYTE*)this )) { _ASSERTE(!"Write Barrier violation. Must use SetObjectReference() to assign OBJECTREF's into the GC heap!"); } m_asObj = pObject; VALIDATEOBJECT(m_asObj); if (m_asObj != 0) { ENABLESTRESSHEAP(); } Thread::ObjectRefNew(this); } void OBJECTREF::Validate(BOOL bDeep, BOOL bVerifyNextHeader, BOOL bVerifySyncBlock) { LIMITED_METHOD_CONTRACT; if (m_asObj) { m_asObj->Validate(bDeep, bVerifyNextHeader, bVerifySyncBlock); } } //------------------------------------------------------------- // Test against NULL. //------------------------------------------------------------- int OBJECTREF::operator!() const { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_FORBID_FAULT; // We don't do any validation here, as we want to allow zero comparison in preemptive mode return !m_asObj; } //------------------------------------------------------------- // Compare two OBJECTREF's. //------------------------------------------------------------- int OBJECTREF::operator==(const OBJECTREF &objref) const { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_FORBID_FAULT; if (objref.m_asObj != NULL) // Allow comparison to zero in preemptive mode { // REVISIT_TODO: Weakening the contract system a little bit here. We should really // add a special NULLOBJECTREF which can be used for these situations and have // a seperate code path for that with the correct contract protections. STATIC_CONTRACT_VIOLATION(ModeViolation); VALIDATEOBJECT(objref.m_asObj); // !!! If this assert is fired, there are two possibilities: // !!! 1. You are doing a type cast, e.g. *(OBJECTREF*)pObj // !!! Instead, you should use ObjectToOBJECTREF(*(Object**)pObj), // !!! or ObjectToSTRINGREF(*(StringObject**)pObj) // !!! 2. There is a real GC hole here. // !!! Either way you need to fix the code. _ASSERTE(Thread::IsObjRefValid(&objref)); VALIDATEOBJECT(m_asObj); // If this assert fires, you probably did not protect // your OBJECTREF and a GC might have occurred. To // where the possible GC was, set a breakpoint in Thread::TriggersGC _ASSERTE(Thread::IsObjRefValid(this)); if (m_asObj != 0 || objref.m_asObj != 0) { ENABLESTRESSHEAP(); } } return m_asObj == objref.m_asObj; } //------------------------------------------------------------- // Compare two OBJECTREF's. //------------------------------------------------------------- int OBJECTREF::operator!=(const OBJECTREF &objref) const { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_FORBID_FAULT; if (objref.m_asObj != NULL) // Allow comparison to zero in preemptive mode { // REVISIT_TODO: Weakening the contract system a little bit here. We should really // add a special NULLOBJECTREF which can be used for these situations and have // a seperate code path for that with the correct contract protections. STATIC_CONTRACT_VIOLATION(ModeViolation); VALIDATEOBJECT(objref.m_asObj); // !!! If this assert is fired, there are two possibilities: // !!! 1. You are doing a type cast, e.g. *(OBJECTREF*)pObj // !!! Instead, you should use ObjectToOBJECTREF(*(Object**)pObj), // !!! or ObjectToSTRINGREF(*(StringObject**)pObj) // !!! 2. There is a real GC hole here. // !!! Either way you need to fix the code. _ASSERTE(Thread::IsObjRefValid(&objref)); VALIDATEOBJECT(m_asObj); // If this assert fires, you probably did not protect // your OBJECTREF and a GC might have occurred. To // where the possible GC was, set a breakpoint in Thread::TriggersGC _ASSERTE(Thread::IsObjRefValid(this)); if (m_asObj != 0 || objref.m_asObj != 0) { ENABLESTRESSHEAP(); } } return m_asObj != objref.m_asObj; } //------------------------------------------------------------- // Forward method calls. //------------------------------------------------------------- Object* OBJECTREF::operator->() { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_FORBID_FAULT; VALIDATEOBJECT(m_asObj); // If this assert fires, you probably did not protect // your OBJECTREF and a GC might have occurred. To // where the possible GC was, set a breakpoint in Thread::TriggersGC _ASSERTE(Thread::IsObjRefValid(this)); if (m_asObj != 0) { ENABLESTRESSHEAP(); } // if you are using OBJECTREF directly, // you probably want an Object * return (Object *)m_asObj; } //------------------------------------------------------------- // Forward method calls. //------------------------------------------------------------- const Object* OBJECTREF::operator->() const { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_FORBID_FAULT; VALIDATEOBJECT(m_asObj); // If this assert fires, you probably did not protect // your OBJECTREF and a GC might have occurred. To // where the possible GC was, set a breakpoint in Thread::TriggersGC _ASSERTE(Thread::IsObjRefValid(this)); if (m_asObj != 0) { ENABLESTRESSHEAP(); } // if you are using OBJECTREF directly, // you probably want an Object * return (Object *)m_asObj; } //------------------------------------------------------------- // Assignment. We don't validate the destination so as not // to break the sequence: // // OBJECTREF or; // or = ...; //------------------------------------------------------------- OBJECTREF& OBJECTREF::operator=(const OBJECTREF &objref) { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_FORBID_FAULT; VALIDATEOBJECT(objref.m_asObj); // !!! If this assert is fired, there are two possibilities: // !!! 1. You are doing a type cast, e.g. *(OBJECTREF*)pObj // !!! Instead, you should use ObjectToOBJECTREF(*(Object**)pObj), // !!! or ObjectToSTRINGREF(*(StringObject**)pObj) // !!! 2. There is a real GC hole here. // !!! Either way you need to fix the code. _ASSERTE(Thread::IsObjRefValid(&objref)); if ((objref.m_asObj != 0) && ((IGCHeap*)GCHeapUtilities::GetGCHeap())->IsHeapPointer( (BYTE*)this )) { _ASSERTE(!"Write Barrier violation. Must use SetObjectReference() to assign OBJECTREF's into the GC heap!"); } Thread::ObjectRefAssign(this); m_asObj = objref.m_asObj; if (m_asObj != 0) { ENABLESTRESSHEAP(); } return *this; } //------------------------------------------------------------- // Allows for the assignment of NULL to a OBJECTREF //------------------------------------------------------------- OBJECTREF& OBJECTREF::operator=(TADDR nul) { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_FORBID_FAULT; _ASSERTE(nul == 0); Thread::ObjectRefAssign(this); m_asObj = (Object*)nul; if (m_asObj != 0) { ENABLESTRESSHEAP(); } return *this; } #endif // DEBUG #ifdef _DEBUG void* __cdecl GCSafeMemCpy(void * dest, const void * src, size_t len) { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_FORBID_FAULT; if (!(((*(BYTE**)&dest) < g_lowest_address ) || ((*(BYTE**)&dest) >= g_highest_address))) { Thread* pThread = GetThreadNULLOk(); // GCHeapUtilities::IsHeapPointer has race when called in preemptive mode. It walks the list of segments // that can be modified by GC. Do the check below only if it is safe to do so. if (pThread != NULL && pThread->PreemptiveGCDisabled()) { // Note there is memcpyNoGCRefs which will allow you to do a memcpy into the GC // heap if you really know you don't need to call the write barrier _ASSERTE(!GCHeapUtilities::GetGCHeap()->IsHeapPointer((BYTE *) dest) || !"using memcpy to copy into the GC heap, use CopyValueClass"); } } return memcpyNoGCRefs(dest, src, len); } #endif // _DEBUG // This function clears a piece of memory in a GC safe way. It makes the guarantee // that it will clear memory in at least pointer sized chunks whenever possible. // Unaligned memory at the beginning and remaining bytes at the end are written bytewise. // We must make this guarantee whenever we clear memory in the GC heap that could contain // object references. The GC or other user threads can read object references at any time, // clearing them bytewise can result in a read on another thread getting incorrect data. void __fastcall ZeroMemoryInGCHeap(void* mem, size_t size) { WRAPPER_NO_CONTRACT; BYTE* memBytes = (BYTE*) mem; BYTE* endBytes = &memBytes[size]; // handle unaligned bytes at the beginning while (!IS_ALIGNED(memBytes, sizeof(PTR_PTR_VOID)) && memBytes < endBytes) *memBytes++ = 0; // now write pointer sized pieces // volatile ensures that this doesn't get optimized back into a memset call size_t nPtrs = (endBytes - memBytes) / sizeof(PTR_PTR_VOID); PTR_VOID volatile * memPtr = (PTR_PTR_VOID) memBytes; for (size_t i = 0; i < nPtrs; i++) *memPtr++ = 0; // handle remaining bytes at the end memBytes = (BYTE*) memPtr; while (memBytes < endBytes) *memBytes++ = 0; } void StackTraceArray::Append(StackTraceElement const * begin, StackTraceElement const * end) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; PRECONDITION(IsProtectedByGCFrame((OBJECTREF*)this)); } CONTRACTL_END; // ensure that only one thread can write to the array EnsureThreadAffinity(); size_t newsize = Size() + (end - begin); Grow(newsize); memcpyNoGCRefs(GetData() + Size(), begin, (end - begin) * sizeof(StackTraceElement)); MemoryBarrier(); // prevent the newsize from being reordered with the array copy SetSize(newsize); #if defined(_DEBUG) CheckState(); #endif } void StackTraceArray::CheckState() const { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_COOPERATIVE; } CONTRACTL_END; if (!m_array) return; assert(GetObjectThread() == GetThreadNULLOk()); size_t size = Size(); StackTraceElement const * p; p = GetData(); for (size_t i = 0; i < size; ++i) assert(p[i].pFunc != NULL); } void StackTraceArray::Grow(size_t grow_size) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; INJECT_FAULT(ThrowOutOfMemory();); PRECONDITION(IsProtectedByGCFrame((OBJECTREF*)this)); } CONTRACTL_END; size_t raw_size = grow_size * sizeof(StackTraceElement) + sizeof(ArrayHeader); if (!m_array) { SetArray(I1ARRAYREF(AllocatePrimitiveArray(ELEMENT_TYPE_I1, static_cast<DWORD>(raw_size)))); SetSize(0); SetObjectThread(); } else { if (Capacity() >= raw_size) return; // allocate a new array, copy the data size_t new_capacity = Max(Capacity() * 2, raw_size); _ASSERTE(new_capacity >= grow_size * sizeof(StackTraceElement) + sizeof(ArrayHeader)); I1ARRAYREF newarr = (I1ARRAYREF) AllocatePrimitiveArray(ELEMENT_TYPE_I1, static_cast<DWORD>(new_capacity)); memcpyNoGCRefs(newarr->GetDirectPointerToNonObjectElements(), GetRaw(), Size() * sizeof(StackTraceElement) + sizeof(ArrayHeader)); SetArray(newarr); } } void StackTraceArray::EnsureThreadAffinity() { WRAPPER_NO_CONTRACT; if (!m_array) return; if (GetObjectThread() != GetThreadNULLOk()) { // object is being changed by a thread different from the one which created it // make a copy of the array to prevent a race condition when two different threads try to change it StackTraceArray copy; GCPROTECT_BEGIN(copy); copy.CopyFrom(*this); this->Swap(copy); GCPROTECT_END(); } } // Deep copies the stack trace array void StackTraceArray::CopyFrom(StackTraceArray const & src) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; INJECT_FAULT(ThrowOutOfMemory();); PRECONDITION(IsProtectedByGCFrame((OBJECTREF*)this)); PRECONDITION(IsProtectedByGCFrame((OBJECTREF*)&src)); } CONTRACTL_END; m_array = (I1ARRAYREF) AllocatePrimitiveArray(ELEMENT_TYPE_I1, static_cast<DWORD>(src.Capacity())); Volatile<size_t> size = src.Size(); memcpyNoGCRefs(GetRaw(), src.GetRaw(), size * sizeof(StackTraceElement) + sizeof(ArrayHeader)); SetSize(size); // set size to the exact value which was used when we copied the data // another thread might have changed it at the time of copying SetObjectThread(); // affinitize the newly created array with the current thread } #ifdef _DEBUG //=============================================================================== // Code that insures that our unmanaged version of Nullable is consistant with // the managed version Nullable<T> for all T. void Nullable::CheckFieldOffsets(TypeHandle nullableType) { LIMITED_METHOD_CONTRACT; /*** // The non-instantiated method tables like List<T> that are used // by reflection and verification do not have correct field offsets // but we never make instances of these anyway. if (nullableMT->ContainsGenericVariables()) return; ***/ MethodTable* nullableMT = nullableType.GetMethodTable(); // insure that the managed version of the table is the same as the // unmanaged. Note that we can't do this in corelib.h because this // class is generic and field layout depends on the instantiation. _ASSERTE(nullableMT->GetNumInstanceFields() == 2); FieldDesc* field = nullableMT->GetApproxFieldDescListRaw(); _ASSERTE(strcmp(field->GetDebugName(), "hasValue") == 0); // _ASSERTE(field->GetOffset() == offsetof(Nullable, hasValue)); field++; _ASSERTE(strcmp(field->GetDebugName(), "value") == 0); // _ASSERTE(field->GetOffset() == offsetof(Nullable, value)); } #endif //=============================================================================== // Returns true if nullableMT is Nullable<T> for T is equivalent to paramMT BOOL Nullable::IsNullableForTypeHelper(MethodTable* nullableMT, MethodTable* paramMT) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_ANY; } CONTRACTL_END; if (!nullableMT->IsNullable()) return FALSE; // we require the parameter types to be equivalent return TypeHandle(paramMT).IsEquivalentTo(nullableMT->GetInstantiation()[0]); } //=============================================================================== // Returns true if nullableMT is Nullable<T> for T == paramMT BOOL Nullable::IsNullableForTypeHelperNoGC(MethodTable* nullableMT, MethodTable* paramMT) { LIMITED_METHOD_CONTRACT; if (!nullableMT->IsNullable()) return FALSE; // we require an exact match of the parameter types return TypeHandle(paramMT) == nullableMT->GetInstantiation()[0]; } //=============================================================================== CLR_BOOL* Nullable::HasValueAddr(MethodTable* nullableMT) { LIMITED_METHOD_CONTRACT; _ASSERTE(strcmp(nullableMT->GetApproxFieldDescListRaw()[0].GetDebugName(), "hasValue") == 0); _ASSERTE(nullableMT->GetApproxFieldDescListRaw()[0].GetOffset() == 0); return (CLR_BOOL*) this; } //=============================================================================== void* Nullable::ValueAddr(MethodTable* nullableMT) { LIMITED_METHOD_CONTRACT; _ASSERTE(strcmp(nullableMT->GetApproxFieldDescListRaw()[1].GetDebugName(), "value") == 0); return (((BYTE*) this) + nullableMT->GetApproxFieldDescListRaw()[1].GetOffset()); } //=============================================================================== // Special Logic to box a nullable<T> as a boxed<T> OBJECTREF Nullable::Box(void* srcPtr, MethodTable* nullableMT) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; } CONTRACTL_END; FAULT_NOT_FATAL(); // FIX_NOW: why do we need this? Nullable* src = (Nullable*) srcPtr; _ASSERTE(IsNullableType(nullableMT)); // We better have a concrete instantiation, or our field offset asserts are not useful _ASSERTE(!nullableMT->ContainsGenericVariables()); if (!*src->HasValueAddr(nullableMT)) return NULL; OBJECTREF obj = 0; GCPROTECT_BEGININTERIOR (src); MethodTable* argMT = nullableMT->GetInstantiation()[0].AsMethodTable(); obj = argMT->Allocate(); CopyValueClass(obj->UnBox(), src->ValueAddr(nullableMT), argMT); GCPROTECT_END (); return obj; } //=============================================================================== // Special Logic to unbox a boxed T as a nullable<T> BOOL Nullable::UnBox(void* destPtr, OBJECTREF boxedVal, MethodTable* destMT) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; } CONTRACTL_END; Nullable* dest = (Nullable*) destPtr; BOOL fRet = TRUE; // We should only get here if we are unboxing a T as a Nullable<T> _ASSERTE(IsNullableType(destMT)); // We better have a concrete instantiation, or our field offset asserts are not useful _ASSERTE(!destMT->ContainsGenericVariables()); if (boxedVal == NULL) { // Logically we are doing *dest->HasValueAddr(destMT) = false; // We zero out the whole structure becasue it may contain GC references // and these need to be initialized to zero. (could optimize in the non-GC case) InitValueClass(destPtr, destMT); fRet = TRUE; } else { GCPROTECT_BEGIN(boxedVal); if (!IsNullableForType(destMT, boxedVal->GetMethodTable())) { // For safety's sake, also allow true nullables to be unboxed normally. // This should not happen normally, but we want to be robust if (destMT->IsEquivalentTo(boxedVal->GetMethodTable())) { CopyValueClass(dest, boxedVal->GetData(), destMT); fRet = TRUE; } else { fRet = FALSE; } } else { *dest->HasValueAddr(destMT) = true; CopyValueClass(dest->ValueAddr(destMT), boxedVal->UnBox(), boxedVal->GetMethodTable()); fRet = TRUE; } GCPROTECT_END(); } return fRet; } //=============================================================================== // Special Logic to unbox a boxed T as a nullable<T> // Does not handle type equivalence (may conservatively return FALSE) BOOL Nullable::UnBoxNoGC(void* destPtr, OBJECTREF boxedVal, MethodTable* destMT) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_COOPERATIVE; } CONTRACTL_END; Nullable* dest = (Nullable*) destPtr; // We should only get here if we are unboxing a T as a Nullable<T> _ASSERTE(IsNullableType(destMT)); // We better have a concrete instantiation, or our field offset asserts are not useful _ASSERTE(!destMT->ContainsGenericVariables()); if (boxedVal == NULL) { // Logically we are doing *dest->HasValueAddr(destMT) = false; // We zero out the whole structure becasue it may contain GC references // and these need to be initialized to zero. (could optimize in the non-GC case) InitValueClass(destPtr, destMT); } else { if (!IsNullableForTypeNoGC(destMT, boxedVal->GetMethodTable())) { // For safety's sake, also allow true nullables to be unboxed normally. // This should not happen normally, but we want to be robust if (destMT == boxedVal->GetMethodTable()) { CopyValueClass(dest, boxedVal->GetData(), destMT); return TRUE; } return FALSE; } *dest->HasValueAddr(destMT) = true; CopyValueClass(dest->ValueAddr(destMT), boxedVal->UnBox(), boxedVal->GetMethodTable()); } return TRUE; } //=============================================================================== // Special Logic to unbox a boxed T as a nullable<T> into an argument // specified by the argDest. // Does not handle type equivalence (may conservatively return FALSE) BOOL Nullable::UnBoxIntoArgNoGC(ArgDestination *argDest, OBJECTREF boxedVal, MethodTable* destMT) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_COOPERATIVE; } CONTRACTL_END; #if defined(UNIX_AMD64_ABI) if (argDest->IsStructPassedInRegs()) { // We should only get here if we are unboxing a T as a Nullable<T> _ASSERTE(IsNullableType(destMT)); // We better have a concrete instantiation, or our field offset asserts are not useful _ASSERTE(!destMT->ContainsGenericVariables()); if (boxedVal == NULL) { // Logically we are doing *dest->HasValueAddr(destMT) = false; // We zero out the whole structure becasue it may contain GC references // and these need to be initialized to zero. (could optimize in the non-GC case) InitValueClassArg(argDest, destMT); } else { if (!IsNullableForTypeNoGC(destMT, boxedVal->GetMethodTable())) { // For safety's sake, also allow true nullables to be unboxed normally. // This should not happen normally, but we want to be robust if (destMT == boxedVal->GetMethodTable()) { CopyValueClassArg(argDest, boxedVal->GetData(), destMT, 0); return TRUE; } return FALSE; } Nullable* dest = (Nullable*)argDest->GetStructGenRegDestinationAddress(); *dest->HasValueAddr(destMT) = true; int destOffset = (BYTE*)dest->ValueAddr(destMT) - (BYTE*)dest; CopyValueClassArg(argDest, boxedVal->UnBox(), boxedVal->GetMethodTable(), destOffset); } return TRUE; } #endif // UNIX_AMD64_ABI return UnBoxNoGC(argDest->GetDestinationAddress(), boxedVal, destMT); } //=============================================================================== // Special Logic to unbox a boxed T as a nullable<T> // Does not do any type checks. void Nullable::UnBoxNoCheck(void* destPtr, OBJECTREF boxedVal, MethodTable* destMT) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_COOPERATIVE; } CONTRACTL_END; Nullable* dest = (Nullable*) destPtr; // We should only get here if we are unboxing a T as a Nullable<T> _ASSERTE(IsNullableType(destMT)); // We better have a concrete instantiation, or our field offset asserts are not useful _ASSERTE(!destMT->ContainsGenericVariables()); if (boxedVal == NULL) { // Logically we are doing *dest->HasValueAddr(destMT) = false; // We zero out the whole structure becasue it may contain GC references // and these need to be initialized to zero. (could optimize in the non-GC case) InitValueClass(destPtr, destMT); } else { if (IsNullableType(boxedVal->GetMethodTable())) { // For safety's sake, also allow true nullables to be unboxed normally. // This should not happen normally, but we want to be robust CopyValueClass(dest, boxedVal->GetData(), destMT); } *dest->HasValueAddr(destMT) = true; CopyValueClass(dest->ValueAddr(destMT), boxedVal->UnBox(), boxedVal->GetMethodTable()); } } //=============================================================================== // a boxed Nullable<T> should either be null or a boxed T, but sometimes it is // useful to have a 'true' boxed Nullable<T> (that is it has two fields). This // function returns a 'normalized' version of this pointer. OBJECTREF Nullable::NormalizeBox(OBJECTREF obj) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; } CONTRACTL_END; if (obj != NULL) { MethodTable* retMT = obj->GetMethodTable(); if (Nullable::IsNullableType(retMT)) obj = Nullable::Box(obj->GetData(), retMT); } return obj; } void ThreadBaseObject::SetInternal(Thread *it) { WRAPPER_NO_CONTRACT; // only allow a transition from NULL to non-NULL _ASSERTE((m_InternalThread == NULL) && (it != NULL)); m_InternalThread = it; // Now the native Thread will only be destroyed after the managed Thread is collected. // Tell the GC that the managed Thread actually represents much more memory. GCInterface::AddMemoryPressure(sizeof(Thread)); } void ThreadBaseObject::ClearInternal() { WRAPPER_NO_CONTRACT; _ASSERTE(m_InternalThread != NULL); m_InternalThread = NULL; GCInterface::RemoveMemoryPressure(sizeof(Thread)); } #endif // #ifndef DACCESS_COMPILE StackTraceElement const & StackTraceArray::operator[](size_t index) const { WRAPPER_NO_CONTRACT; return GetData()[index]; } StackTraceElement & StackTraceArray::operator[](size_t index) { WRAPPER_NO_CONTRACT; return GetData()[index]; } #if !defined(DACCESS_COMPILE) // Define the lock used to access stacktrace from an exception object SpinLock g_StackTraceArrayLock; void ExceptionObject::SetStackTrace(I1ARRAYREF stackTrace, PTRARRAYREF dynamicMethodArray) { CONTRACTL { GC_NOTRIGGER; NOTHROW; MODE_COOPERATIVE; } CONTRACTL_END; #ifdef STRESS_LOG if (StressLog::StressLogOn(~0u, 0)) { StressLog::CreateThreadStressLog(); } #endif SpinLock::AcquireLock(&g_StackTraceArrayLock); SetObjectReference((OBJECTREF*)&_stackTrace, (OBJECTREF)stackTrace); SetObjectReference((OBJECTREF*)&_dynamicMethods, (OBJECTREF)dynamicMethodArray); SpinLock::ReleaseLock(&g_StackTraceArrayLock); } #endif // !defined(DACCESS_COMPILE) void ExceptionObject::GetStackTrace(StackTraceArray & stackTrace, PTRARRAYREF * outDynamicMethodArray /*= NULL*/) const { CONTRACTL { GC_NOTRIGGER; NOTHROW; MODE_COOPERATIVE; } CONTRACTL_END; #if !defined(DACCESS_COMPILE) SpinLock::AcquireLock(&g_StackTraceArrayLock); #endif // !defined(DACCESS_COMPILE) StackTraceArray temp(_stackTrace); stackTrace.Swap(temp); if (outDynamicMethodArray != NULL) { *outDynamicMethodArray = _dynamicMethods; } #if !defined(DACCESS_COMPILE) SpinLock::ReleaseLock(&g_StackTraceArrayLock); #endif // !defined(DACCESS_COMPILE) } bool LAHashDependentHashTrackerObject::IsLoaderAllocatorLive() { return (ObjectFromHandle(_dependentHandle) != NULL); } void LAHashDependentHashTrackerObject::GetDependentAndLoaderAllocator(OBJECTREF *pLoaderAllocatorRef, GCHEAPHASHOBJECTREF *pGCHeapHash) { OBJECTREF primary = ObjectFromHandle(_dependentHandle); if (pLoaderAllocatorRef != NULL) *pLoaderAllocatorRef = primary; IGCHandleManager *mgr = GCHandleUtilities::GetGCHandleManager(); // Secondary is tracked only if primary is non-null if (pGCHeapHash != NULL) *pGCHeapHash = (GCHEAPHASHOBJECTREF)(OBJECTREF)((primary != NULL) ? mgr->GetDependentHandleSecondary(_dependentHandle) : NULL); }
1
dotnet/runtime
66,234
Remove compiler warning suppression
Contributes to https://github.com/dotnet/runtime/issues/66154 All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address. ~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~ Upstream PR: https://github.com/libunwind/libunwind/pull/333 /cc @GrabYourPitchforks
AaronRobinsonMSFT
2022-03-05T03:45:49Z
2022-03-09T00:57:12Z
220e67755ae6323a01011b83070dff6f84b02519
853e494abd1c1e12d28a76829b4680af7f694afa
Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154 All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address. ~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~ Upstream PR: https://github.com/libunwind/libunwind/pull/333 /cc @GrabYourPitchforks
./src/coreclr/vm/reflectioninvocation.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // #include "common.h" #include "reflectioninvocation.h" #include "invokeutil.h" #include "object.h" #include "class.h" #include "method.hpp" #include "typehandle.h" #include "field.h" #include "eeconfig.h" #include "vars.hpp" #include "jitinterface.h" #include "contractimpl.h" #include "virtualcallstub.h" #include "comdelegate.h" #include "generics.h" #ifdef FEATURE_COMINTEROP #include "interoputil.h" #include "runtimecallablewrapper.h" #endif #include "dbginterface.h" #include "argdestination.h" /**************************************************************************/ /* if the type handle 'th' is a byref to a nullable type, return the type handle to the nullable type in the byref. Otherwise return the null type handle */ static TypeHandle NullableTypeOfByref(TypeHandle th) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; if (th.GetVerifierCorElementType() != ELEMENT_TYPE_BYREF) return TypeHandle(); TypeHandle subType = th.AsTypeDesc()->GetTypeParam(); if (!Nullable::IsNullableType(subType)) return TypeHandle(); return subType; } static void TryCallMethodWorker(MethodDescCallSite* pMethodCallSite, ARG_SLOT* args, Frame* pDebuggerCatchFrame) { // Use static contracts b/c we have SEH. STATIC_CONTRACT_THROWS; STATIC_CONTRACT_GC_TRIGGERS; STATIC_CONTRACT_MODE_ANY; struct Param: public NotifyOfCHFFilterWrapperParam { MethodDescCallSite * pMethodCallSite; ARG_SLOT* args; } param; param.pFrame = pDebuggerCatchFrame; param.pMethodCallSite = pMethodCallSite; param.args = args; PAL_TRY(Param *, pParam, &param) { pParam->pMethodCallSite->CallWithValueTypes(pParam->args); } PAL_EXCEPT_FILTER(NotifyOfCHFFilterWrapper) { // Should never reach here b/c handler should always continue search. _ASSERTE(false); } PAL_ENDTRY } // Warning: This method has subtle differences from CallDescrWorkerReflectionWrapper // In particular that one captures watson bucket data and corrupting exception severity, // then transfers that data to the newly produced TargetInvocationException. This one // doesn't take those same steps. // static void TryCallMethod(MethodDescCallSite* pMethodCallSite, ARG_SLOT* args, bool wrapExceptions) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; } CONTRACTL_END; if (wrapExceptions) { OBJECTREF ppException = NULL; GCPROTECT_BEGIN(ppException); // The sole purpose of having this frame is to tell the debugger that we have a catch handler here // which may swallow managed exceptions. The debugger needs this in order to send a // CatchHandlerFound (CHF) notification. FrameWithCookie<DebuggerU2MCatchHandlerFrame> catchFrame; EX_TRY{ TryCallMethodWorker(pMethodCallSite, args, &catchFrame); } EX_CATCH{ ppException = GET_THROWABLE(); _ASSERTE(ppException); } EX_END_CATCH(RethrowTransientExceptions) catchFrame.Pop(); // It is important to re-throw outside the catch block because re-throwing will invoke // the jitter and managed code and will cause us to use more than the backout stack limit. if (ppException != NULL) { // If we get here we need to throw an TargetInvocationException OBJECTREF except = InvokeUtil::CreateTargetExcept(&ppException); COMPlusThrow(except); } GCPROTECT_END(); } else { pMethodCallSite->CallWithValueTypes(args); } } FCIMPL5(Object*, RuntimeFieldHandle::GetValue, ReflectFieldObject *pFieldUNSAFE, Object *instanceUNSAFE, ReflectClassBaseObject *pFieldTypeUNSAFE, ReflectClassBaseObject *pDeclaringTypeUNSAFE, CLR_BOOL *pDomainInitialized) { CONTRACTL { FCALL_CHECK; } CONTRACTL_END; struct _gc { OBJECTREF target; REFLECTCLASSBASEREF pFieldType; REFLECTCLASSBASEREF pDeclaringType; REFLECTFIELDREF refField; }gc; gc.target = ObjectToOBJECTREF(instanceUNSAFE); gc.pFieldType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pFieldTypeUNSAFE); gc.pDeclaringType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pDeclaringTypeUNSAFE); gc.refField = (REFLECTFIELDREF)ObjectToOBJECTREF(pFieldUNSAFE); if ((gc.pFieldType == NULL) || (gc.refField == NULL)) FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle")); TypeHandle fieldType = gc.pFieldType->GetType(); TypeHandle declaringType = (gc.pDeclaringType != NULL) ? gc.pDeclaringType->GetType() : TypeHandle(); Assembly *pAssem; if (declaringType.IsNull()) { // global field pAssem = gc.refField->GetField()->GetModule()->GetAssembly(); } else { pAssem = declaringType.GetAssembly(); } OBJECTREF rv = NULL; // not protected HELPER_METHOD_FRAME_BEGIN_RET_PROTECT(gc); // There can be no GC after this until the Object is returned. rv = InvokeUtil::GetFieldValue(gc.refField->GetField(), fieldType, &gc.target, declaringType, pDomainInitialized); HELPER_METHOD_FRAME_END(); return OBJECTREFToObject(rv); } FCIMPLEND FCIMPL2(FC_BOOL_RET, ReflectionInvocation::CanValueSpecialCast, ReflectClassBaseObject *pValueTypeUNSAFE, ReflectClassBaseObject *pTargetTypeUNSAFE) { CONTRACTL { FCALL_CHECK; PRECONDITION(CheckPointer(pValueTypeUNSAFE)); PRECONDITION(CheckPointer(pTargetTypeUNSAFE)); } CONTRACTL_END; REFLECTCLASSBASEREF refValueType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pValueTypeUNSAFE); REFLECTCLASSBASEREF refTargetType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pTargetTypeUNSAFE); TypeHandle valueType = refValueType->GetType(); TypeHandle targetType = refTargetType->GetType(); // we are here only if the target type is a primitive, an enum or a pointer CorElementType targetCorElement = targetType.GetVerifierCorElementType(); BOOL ret = TRUE; HELPER_METHOD_FRAME_BEGIN_RET_2(refValueType, refTargetType); // the field type is a pointer if (targetCorElement == ELEMENT_TYPE_PTR || targetCorElement == ELEMENT_TYPE_FNPTR) { // the object must be an IntPtr or a System.Reflection.Pointer if (valueType == TypeHandle(CoreLibBinder::GetClass(CLASS__INTPTR))) { // // it's an IntPtr, it's good. } // // it's a System.Reflection.Pointer object // void* assigns to any pointer. Otherwise the type of the pointer must match else if (!InvokeUtil::IsVoidPtr(targetType)) { if (!valueType.CanCastTo(targetType)) ret = FALSE; } } else { // the field type is an enum or a primitive. To have any chance of assignement the object type must // be an enum or primitive as well. // So get the internal cor element and that must be the same or widen CorElementType valueCorElement = valueType.GetVerifierCorElementType(); if (InvokeUtil::IsPrimitiveType(valueCorElement)) ret = (InvokeUtil::CanPrimitiveWiden(targetCorElement, valueCorElement)) ? TRUE : FALSE; else ret = FALSE; } HELPER_METHOD_FRAME_END(); FC_RETURN_BOOL(ret); } FCIMPLEND FCIMPL3(Object*, ReflectionInvocation::AllocateValueType, ReflectClassBaseObject *pTargetTypeUNSAFE, Object *valueUNSAFE, CLR_BOOL fForceTypeChange) { CONTRACTL { FCALL_CHECK; PRECONDITION(CheckPointer(pTargetTypeUNSAFE)); PRECONDITION(CheckPointer(valueUNSAFE, NULL_OK)); } CONTRACTL_END; struct _gc { REFLECTCLASSBASEREF refTargetType; OBJECTREF value; OBJECTREF obj; }gc; gc.value = ObjectToOBJECTREF(valueUNSAFE); gc.obj = gc.value; gc.refTargetType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pTargetTypeUNSAFE); TypeHandle targetType = gc.refTargetType->GetType(); HELPER_METHOD_FRAME_BEGIN_RET_PROTECT(gc); CorElementType targetElementType = targetType.GetSignatureCorElementType(); if (InvokeUtil::IsPrimitiveType(targetElementType) || targetElementType == ELEMENT_TYPE_VALUETYPE) { MethodTable* allocMT = targetType.AsMethodTable(); if (allocMT->IsByRefLike()) { COMPlusThrow(kNotSupportedException, W("NotSupported_ByRefLike")); } if (gc.value != NULL) { // ignore the type of the incoming box if fForceTypeChange is set // and the target type is not nullable if (!fForceTypeChange || Nullable::IsNullableType(targetType)) allocMT = gc.value->GetMethodTable(); } // for null Nullable<T> we don't want a default value being created. // just allow the null value to be passed, as it will be converted to // a true nullable if (!(gc.value == NULL && Nullable::IsNullableType(targetType))) { // boxed value type are 'read-only' in the sence that you can't // only the implementor of the value type can expose mutators. // To insure byrefs don't mutate value classes in place, we make // a copy (and if we were not given one, we create a null value type // instance. gc.obj = allocMT->Allocate(); if (gc.value != NULL) CopyValueClass(gc.obj->UnBox(), gc.value->UnBox(), allocMT); } } HELPER_METHOD_FRAME_END(); return OBJECTREFToObject(gc.obj); } FCIMPLEND FCIMPL7(void, RuntimeFieldHandle::SetValue, ReflectFieldObject *pFieldUNSAFE, Object *targetUNSAFE, Object *valueUNSAFE, ReflectClassBaseObject *pFieldTypeUNSAFE, DWORD attr, ReflectClassBaseObject *pDeclaringTypeUNSAFE, CLR_BOOL *pDomainInitialized) { CONTRACTL { FCALL_CHECK; } CONTRACTL_END; struct _gc { OBJECTREF target; OBJECTREF value; REFLECTCLASSBASEREF fieldType; REFLECTCLASSBASEREF declaringType; REFLECTFIELDREF refField; } gc; gc.target = ObjectToOBJECTREF(targetUNSAFE); gc.value = ObjectToOBJECTREF(valueUNSAFE); gc.fieldType= (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pFieldTypeUNSAFE); gc.declaringType= (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pDeclaringTypeUNSAFE); gc.refField = (REFLECTFIELDREF)ObjectToOBJECTREF(pFieldUNSAFE); if ((gc.fieldType == NULL) || (gc.refField == NULL)) FCThrowResVoid(kArgumentNullException, W("Arg_InvalidHandle")); TypeHandle fieldType = gc.fieldType->GetType(); TypeHandle declaringType = gc.declaringType != NULL ? gc.declaringType->GetType() : TypeHandle(); Assembly *pAssem; if (declaringType.IsNull()) { // global field pAssem = gc.refField->GetField()->GetModule()->GetAssembly(); } else { pAssem = declaringType.GetAssembly(); } FC_GC_POLL_NOT_NEEDED(); FieldDesc* pFieldDesc = gc.refField->GetField(); HELPER_METHOD_FRAME_BEGIN_PROTECT(gc); InvokeUtil::SetValidField(fieldType.GetVerifierCorElementType(), fieldType, pFieldDesc, &gc.target, &gc.value, declaringType, pDomainInitialized); HELPER_METHOD_FRAME_END(); } FCIMPLEND extern "C" void QCALLTYPE RuntimeTypeHandle_CreateInstanceForAnotherGenericParameter( QCall::TypeHandle pTypeHandle, TypeHandle* pInstArray, INT32 cInstArray, QCall::ObjectHandleOnStack pInstantiatedObject ) { CONTRACTL{ QCALL_CHECK; PRECONDITION(!pTypeHandle.AsTypeHandle().IsNull()); PRECONDITION(cInstArray >= 0); PRECONDITION(cInstArray == 0 || pInstArray != NULL); } CONTRACTL_END; TypeHandle genericType = pTypeHandle.AsTypeHandle(); BEGIN_QCALL; _ASSERTE (genericType.HasInstantiation()); TypeHandle instantiatedType = ((TypeHandle)genericType.GetCanonicalMethodTable()).Instantiate(Instantiation(pInstArray, (DWORD)cInstArray)); // Get the type information associated with refThis MethodTable* pVMT = instantiatedType.GetMethodTable(); _ASSERTE (pVMT != 0 && !instantiatedType.IsTypeDesc()); _ASSERTE( !pVMT->IsAbstract() ||! instantiatedType.ContainsGenericVariables()); _ASSERTE(!pVMT->IsByRefLike() && pVMT->HasDefaultConstructor()); // We've got the class, lets allocate it and call the constructor // Nullables don't take this path, if they do we need special logic to make an instance _ASSERTE(!Nullable::IsNullableType(instantiatedType)); { GCX_COOP(); OBJECTREF newObj = instantiatedType.GetMethodTable()->Allocate(); GCPROTECT_BEGIN(newObj); CallDefaultConstructor(newObj); GCPROTECT_END(); pInstantiatedObject.Set(newObj); } END_QCALL; } NOINLINE FC_BOOL_RET IsInstanceOfTypeHelper(OBJECTREF obj, REFLECTCLASSBASEREF refType) { FCALL_CONTRACT; BOOL canCast = false; FC_INNER_PROLOG(RuntimeTypeHandle::IsInstanceOfType); HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB_2(Frame::FRAME_ATTR_EXACT_DEPTH|Frame::FRAME_ATTR_CAPTURE_DEPTH_2, obj, refType); canCast = ObjIsInstanceOf(OBJECTREFToObject(obj), refType->GetType()); HELPER_METHOD_FRAME_END(); FC_RETURN_BOOL(canCast); } FCIMPL2(FC_BOOL_RET, RuntimeTypeHandle::IsInstanceOfType, ReflectClassBaseObject* pTypeUNSAFE, Object *objectUNSAFE) { FCALL_CONTRACT; OBJECTREF obj = ObjectToOBJECTREF(objectUNSAFE); REFLECTCLASSBASEREF refType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pTypeUNSAFE); // Null is not instance of anything in reflection world if (obj == NULL) FC_RETURN_BOOL(false); if (refType == NULL) FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle")); switch (ObjIsInstanceOfCached(objectUNSAFE, refType->GetType())) { case TypeHandle::CanCast: FC_RETURN_BOOL(true); case TypeHandle::CannotCast: FC_RETURN_BOOL(false); default: // fall through to the slow helper break; } FC_INNER_RETURN(FC_BOOL_RET, IsInstanceOfTypeHelper(obj, refType)); } FCIMPLEND /****************************************************************************/ /* boxed Nullable<T> are represented as a boxed T, so there is no unboxed Nullable<T> inside to point at by reference. Because of this a byref parameters of type Nullable<T> are copied out of the boxed instance (to a place on the stack), before the call is made (and this copy is pointed at). After the call returns, this copy must be copied back to the original argument array. ByRefToNullable, is a simple linked list that remembers what copy-backs are needed */ struct ByRefToNullable { unsigned argNum; // The argument number for this byrefNullable argument void* data; // The data to copy back to the ByRefNullable. This points to the stack TypeHandle type; // The type of Nullable for this argument ByRefToNullable* next; // list of these ByRefToNullable(unsigned aArgNum, void* aData, TypeHandle aType, ByRefToNullable* aNext) { argNum = aArgNum; data = aData; type = aType; next = aNext; } }; static void CallDescrWorkerReflectionWrapper(CallDescrData * pCallDescrData, Frame * pFrame) { // Use static contracts b/c we have SEH. STATIC_CONTRACT_THROWS; STATIC_CONTRACT_GC_TRIGGERS; STATIC_CONTRACT_MODE_ANY; struct Param: public NotifyOfCHFFilterWrapperParam { CallDescrData * pCallDescrData; } param; param.pFrame = pFrame; param.pCallDescrData = pCallDescrData; PAL_TRY(Param *, pParam, &param) { CallDescrWorkerWithHandler(pParam->pCallDescrData); } PAL_EXCEPT_FILTER(ReflectionInvocationExceptionFilter) { // Should never reach here b/c handler should always continue search. _ASSERTE(false); } PAL_ENDTRY } // CallDescrWorkerReflectionWrapper static OBJECTREF InvokeArrayConstructor(TypeHandle th, Span<OBJECTREF>* objs, int argCnt) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; } CONTRACTL_END; // Validate the argCnt an the Rank. Also allow nested SZARRAY's. _ASSERTE(argCnt == (int) th.GetRank() || argCnt == (int) th.GetRank() * 2 || th.GetInternalCorElementType() == ELEMENT_TYPE_SZARRAY); // Validate all of the parameters. These all typed as integers int allocSize = 0; if (!ClrSafeInt<int>::multiply(sizeof(INT32), argCnt, allocSize)) COMPlusThrow(kArgumentException, IDS_EE_SIGTOOCOMPLEX); INT32* indexes = (INT32*) _alloca((size_t)allocSize); ZeroMemory(indexes, allocSize); for (DWORD i=0; i<(DWORD)argCnt; i++) { if (!objs->GetAt(i)) COMPlusThrowArgumentException(W("parameters"), W("Arg_NullIndex")); MethodTable* pMT = objs->GetAt(i)->GetMethodTable(); CorElementType oType = TypeHandle(pMT).GetVerifierCorElementType(); if (!InvokeUtil::IsPrimitiveType(oType) || !InvokeUtil::CanPrimitiveWiden(ELEMENT_TYPE_I4,oType)) COMPlusThrow(kArgumentException,W("Arg_PrimWiden")); ARG_SLOT value; InvokeUtil::CreatePrimitiveValue(ELEMENT_TYPE_I4, oType, objs->GetAt(i), &value); memcpyNoGCRefs(indexes + i, ArgSlotEndianessFixup(&value, sizeof(INT32)), sizeof(INT32)); } return AllocateArrayEx(th, indexes, argCnt); } static BOOL IsActivationNeededForMethodInvoke(MethodDesc * pMD) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; } CONTRACTL_END; // The activation for non-generic instance methods is covered by non-null "this pointer" if (!pMD->IsStatic() && !pMD->HasMethodInstantiation() && !pMD->IsInterface()) return FALSE; // We need to activate the instance at least once pMD->EnsureActive(); return FALSE; } class ArgIteratorBaseForMethodInvoke { protected: SIGNATURENATIVEREF * m_ppNativeSig; bool m_fHasThis; FORCEINLINE CorElementType GetReturnType(TypeHandle * pthValueType) { WRAPPER_NO_CONTRACT; return (*pthValueType = (*m_ppNativeSig)->GetReturnTypeHandle()).GetInternalCorElementType(); } FORCEINLINE CorElementType GetNextArgumentType(DWORD iArg, TypeHandle * pthValueType) { WRAPPER_NO_CONTRACT; return (*pthValueType = (*m_ppNativeSig)->GetArgumentAt(iArg)).GetInternalCorElementType(); } FORCEINLINE void Reset() { LIMITED_METHOD_CONTRACT; } FORCEINLINE BOOL IsRegPassedStruct(MethodTable* pMT) { return pMT->IsRegPassedStruct(); } public: BOOL HasThis() { LIMITED_METHOD_CONTRACT; return m_fHasThis; } BOOL HasParamType() { LIMITED_METHOD_CONTRACT; // param type methods are not supported for reflection invoke, so HasParamType is always false for them return FALSE; } BOOL IsVarArg() { LIMITED_METHOD_CONTRACT; // vararg methods are not supported for reflection invoke, so IsVarArg is always false for them return FALSE; } DWORD NumFixedArgs() { LIMITED_METHOD_CONTRACT; return (*m_ppNativeSig)->NumFixedArgs(); } #ifdef FEATURE_INTERPRETER BYTE CallConv() { LIMITED_METHOD_CONTRACT; return IMAGE_CEE_CS_CALLCONV_DEFAULT; } #endif // FEATURE_INTERPRETER }; class ArgIteratorForMethodInvoke : public ArgIteratorTemplate<ArgIteratorBaseForMethodInvoke> { public: ArgIteratorForMethodInvoke(SIGNATURENATIVEREF * ppNativeSig, BOOL fCtorOfVariableSizedObject) { m_ppNativeSig = ppNativeSig; m_fHasThis = (*m_ppNativeSig)->HasThis() && !fCtorOfVariableSizedObject; DWORD dwFlags = (*m_ppNativeSig)->GetArgIteratorFlags(); // Use the cached values if they are available if (dwFlags & SIZE_OF_ARG_STACK_COMPUTED) { m_dwFlags = dwFlags; m_nSizeOfArgStack = (*m_ppNativeSig)->GetSizeOfArgStack(); return; } // // Compute flags and stack argument size, and cache them for next invocation // ForceSigWalk(); if (IsActivationNeededForMethodInvoke((*m_ppNativeSig)->GetMethod())) { m_dwFlags |= METHOD_INVOKE_NEEDS_ACTIVATION; } (*m_ppNativeSig)->SetSizeOfArgStack(m_nSizeOfArgStack); _ASSERTE((*m_ppNativeSig)->GetSizeOfArgStack() == m_nSizeOfArgStack); // This has to be last (*m_ppNativeSig)->SetArgIteratorFlags(m_dwFlags); _ASSERTE((*m_ppNativeSig)->GetArgIteratorFlags() == m_dwFlags); } BOOL IsActivationNeeded() { LIMITED_METHOD_CONTRACT; return (m_dwFlags & METHOD_INVOKE_NEEDS_ACTIVATION) != 0; } }; void DECLSPEC_NORETURN ThrowInvokeMethodException(MethodDesc * pMethod, OBJECTREF targetException) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; } CONTRACTL_END; GCPROTECT_BEGIN(targetException); #if defined(_DEBUG) && !defined(TARGET_UNIX) if (IsWatsonEnabled()) { if (!CLRException::IsPreallocatedExceptionObject(targetException)) { // If the exception is not preallocated, we should be having the // watson buckets in the throwable already. if(!((EXCEPTIONREF)targetException)->AreWatsonBucketsPresent()) { // If an exception is raised by the VM (e.g. type load exception by the JIT) and it comes // across the reflection invocation boundary before CLR's personality routine for managed // code has been invoked, then no buckets would be available for us at this point. // // Since we cannot assert this, better log it for diagnosis if required. LOG((LF_EH, LL_INFO100, "InvokeImpl - No watson buckets available - regular exception likely raised within VM and not seen by managed code.\n")); } } else { // Exception is preallocated. PTR_EHWatsonBucketTracker pUEWatsonBucketTracker = GetThread()->GetExceptionState()->GetUEWatsonBucketTracker(); if ((IsThrowableThreadAbortException(targetException) && pUEWatsonBucketTracker->CapturedForThreadAbort())|| (pUEWatsonBucketTracker->CapturedAtReflectionInvocation())) { // ReflectionInvocationExceptionFilter would have captured // the watson bucket details for preallocated exceptions // in the UE watson bucket tracker. if(pUEWatsonBucketTracker->RetrieveWatsonBuckets() == NULL) { // See comment above LOG((LF_EH, LL_INFO100, "InvokeImpl - No watson buckets available - preallocated exception likely raised within VM and not seen by managed code.\n")); } } } } #endif // _DEBUG && !TARGET_UNIX OBJECTREF except = InvokeUtil::CreateTargetExcept(&targetException); #ifndef TARGET_UNIX if (IsWatsonEnabled()) { struct { OBJECTREF oExcept; } gcTIE; ZeroMemory(&gcTIE, sizeof(gcTIE)); GCPROTECT_BEGIN(gcTIE); gcTIE.oExcept = except; _ASSERTE(!CLRException::IsPreallocatedExceptionObject(gcTIE.oExcept)); // If the original exception was preallocated, then copy over the captured // watson buckets to the TargetInvocationException object, if available. // // We dont need to do this if the original exception was not preallocated // since it already contains the watson buckets inside the object. if (CLRException::IsPreallocatedExceptionObject(targetException)) { PTR_EHWatsonBucketTracker pUEWatsonBucketTracker = GetThread()->GetExceptionState()->GetUEWatsonBucketTracker(); BOOL fCopyWatsonBuckets = TRUE; PTR_VOID pBuckets = pUEWatsonBucketTracker->RetrieveWatsonBuckets(); if (pBuckets != NULL) { // Copy the buckets to the exception object CopyWatsonBucketsToThrowable(pBuckets, gcTIE.oExcept); // Confirm that they are present. _ASSERTE(((EXCEPTIONREF)gcTIE.oExcept)->AreWatsonBucketsPresent()); } // Clear the UE watson bucket tracker since the bucketing // details are now in the TargetInvocationException object. pUEWatsonBucketTracker->ClearWatsonBucketDetails(); } // update "except" incase the reference to the object // was updated by the GC except = gcTIE.oExcept; GCPROTECT_END(); } #endif // !TARGET_UNIX // Since the original exception is inner of target invocation exception, // when TIE is seen to be raised for the first time, we will end up // using the inner exception buckets automatically. // Since VM is throwing the exception, we set it to use the same corruption severity // that the original exception came in with from reflection invocation. COMPlusThrow(except); GCPROTECT_END(); } FCIMPL5(Object*, RuntimeMethodHandle::InvokeMethod, Object *target, Span<OBJECTREF>* objs, SignatureNative* pSigUNSAFE, CLR_BOOL fConstructor, CLR_BOOL fWrapExceptions) { FCALL_CONTRACT; struct { OBJECTREF target; SIGNATURENATIVEREF pSig; OBJECTREF retVal; } gc; gc.target = ObjectToOBJECTREF(target); gc.pSig = (SIGNATURENATIVEREF)pSigUNSAFE; gc.retVal = NULL; MethodDesc* pMeth = gc.pSig->GetMethod(); TypeHandle ownerType = gc.pSig->GetDeclaringType(); HELPER_METHOD_FRAME_BEGIN_RET_PROTECT(gc); if (ownerType.IsSharedByGenericInstantiations()) COMPlusThrow(kNotSupportedException, W("NotSupported_Type")); #ifdef _DEBUG if (g_pConfig->ShouldInvokeHalt(pMeth)) { _ASSERTE(!"InvokeHalt"); } #endif BOOL fCtorOfVariableSizedObject = FALSE; if (fConstructor) { // If we are invoking a constructor on an array then we must // handle this specially. if (ownerType.IsArray()) { gc.retVal = InvokeArrayConstructor(ownerType, objs, gc.pSig->NumFixedArgs()); goto Done; } // Variable sized objects, like String instances, allocate themselves // so they are a special case. MethodTable * pMT = ownerType.AsMethodTable(); fCtorOfVariableSizedObject = pMT->HasComponentSize(); if (!fCtorOfVariableSizedObject) gc.retVal = pMT->Allocate(); } { ArgIteratorForMethodInvoke argit(&gc.pSig, fCtorOfVariableSizedObject); if (argit.IsActivationNeeded()) pMeth->EnsureActive(); CONSISTENCY_CHECK(pMeth->CheckActivated()); UINT nStackBytes = argit.SizeOfFrameArgumentArray(); // Note that SizeOfFrameArgumentArray does overflow checks with sufficient margin to prevent overflows here SIZE_T nAllocaSize = TransitionBlock::GetNegSpaceSize() + sizeof(TransitionBlock) + nStackBytes; Thread * pThread = GET_THREAD(); LPBYTE pAlloc = (LPBYTE)_alloca(nAllocaSize); LPBYTE pTransitionBlock = pAlloc + TransitionBlock::GetNegSpaceSize(); CallDescrData callDescrData; callDescrData.pSrc = pTransitionBlock + sizeof(TransitionBlock); _ASSERTE((nStackBytes % TARGET_POINTER_SIZE) == 0); callDescrData.numStackSlots = nStackBytes / TARGET_POINTER_SIZE; #ifdef CALLDESCR_ARGREGS callDescrData.pArgumentRegisters = (ArgumentRegisters*)(pTransitionBlock + TransitionBlock::GetOffsetOfArgumentRegisters()); #endif #ifdef CALLDESCR_RETBUFFARGREG callDescrData.pRetBuffArg = (UINT64*)(pTransitionBlock + TransitionBlock::GetOffsetOfRetBuffArgReg()); #endif #ifdef CALLDESCR_FPARGREGS callDescrData.pFloatArgumentRegisters = NULL; #endif #ifdef CALLDESCR_REGTYPEMAP callDescrData.dwRegTypeMap = 0; #endif callDescrData.fpReturnSize = argit.GetFPReturnSize(); // This is duplicated logic from MethodDesc::GetCallTarget PCODE pTarget; if (pMeth->IsVtableMethod()) { pTarget = pMeth->GetSingleCallableAddrOfVirtualizedCode(&gc.target, ownerType); } else { pTarget = pMeth->GetSingleCallableAddrOfCode(); } callDescrData.pTarget = pTarget; // Build the arguments on the stack GCStress<cfg_any>::MaybeTrigger(); FrameWithCookie<ProtectValueClassFrame> *pProtectValueClassFrame = NULL; ValueClassInfo *pValueClasses = NULL; ByRefToNullable* byRefToNullables = NULL; // if we have the magic Value Class return, we need to allocate that class // and place a pointer to it on the stack. BOOL hasRefReturnAndNeedsBoxing = FALSE; // Indicates that the method has a BYREF return type and the target type needs to be copied into a preallocated boxed object. TypeHandle retTH = gc.pSig->GetReturnTypeHandle(); TypeHandle refReturnTargetTH; // Valid only if retType == ELEMENT_TYPE_BYREF. Caches the TypeHandle of the byref target. BOOL fHasRetBuffArg = argit.HasRetBuffArg(); CorElementType retType = retTH.GetSignatureCorElementType(); BOOL hasValueTypeReturn = retTH.IsValueType() && retType != ELEMENT_TYPE_VOID; _ASSERTE(hasValueTypeReturn || !fHasRetBuffArg); // only valuetypes are returned via a return buffer. if (hasValueTypeReturn) { gc.retVal = retTH.GetMethodTable()->Allocate(); } else if (retType == ELEMENT_TYPE_BYREF) { refReturnTargetTH = retTH.AsTypeDesc()->GetTypeParam(); // If the target of the byref is a value type, we need to preallocate a boxed object to hold the managed return value. if (refReturnTargetTH.IsValueType()) { _ASSERTE(refReturnTargetTH.GetSignatureCorElementType() != ELEMENT_TYPE_VOID); // Managed Reflection layer has a bouncer for "ref void" returns. hasRefReturnAndNeedsBoxing = TRUE; gc.retVal = refReturnTargetTH.GetMethodTable()->Allocate(); } } // Copy "this" pointer if (!pMeth->IsStatic() && !fCtorOfVariableSizedObject) { PVOID pThisPtr; if (fConstructor) { // Copy "this" pointer: only unbox if type is value type and method is not unboxing stub if (ownerType.IsValueType() && !pMeth->IsUnboxingStub()) { // Note that we create a true boxed nullabe<T> and then convert it to a T below pThisPtr = gc.retVal->GetData(); } else pThisPtr = OBJECTREFToObject(gc.retVal); } else if (!pMeth->GetMethodTable()->IsValueType()) pThisPtr = OBJECTREFToObject(gc.target); else { if (pMeth->IsUnboxingStub()) pThisPtr = OBJECTREFToObject(gc.target); else { // Create a true boxed Nullable<T> and use that as the 'this' pointer. // since what is passed in is just a boxed T MethodTable* pMT = pMeth->GetMethodTable(); if (Nullable::IsNullableType(pMT)) { OBJECTREF bufferObj = pMT->Allocate(); void* buffer = bufferObj->GetData(); Nullable::UnBox(buffer, gc.target, pMT); pThisPtr = buffer; } else pThisPtr = gc.target->UnBox(); } } *((LPVOID*) (pTransitionBlock + argit.GetThisOffset())) = pThisPtr; } // NO GC AFTER THIS POINT. The object references in the method frame are not protected. // // We have already copied "this" pointer so we do not want GC to happen even sooner. Unfortunately, // we may allocate in the process of copying this pointer that makes it hard to express using contracts. // // If an exception occurs a gc may happen but we are going to dump the stack anyway and we do // not need to protect anything. { BEGINFORBIDGC(); #ifdef _DEBUG GCForbidLoaderUseHolder forbidLoaderUse; #endif // Take care of any return arguments if (fHasRetBuffArg) { PVOID pRetBuff = gc.retVal->GetData(); *((LPVOID*) (pTransitionBlock + argit.GetRetBuffArgOffset())) = pRetBuff; } // copy args UINT nNumArgs = gc.pSig->NumFixedArgs(); for (UINT i = 0 ; i < nNumArgs; i++) { TypeHandle th = gc.pSig->GetArgumentAt(i); int ofs = argit.GetNextOffset(); _ASSERTE(ofs != TransitionBlock::InvalidOffset); #ifdef CALLDESCR_REGTYPEMAP FillInRegTypeMap(ofs, argit.GetArgType(), (BYTE *)&callDescrData.dwRegTypeMap); #endif #ifdef CALLDESCR_FPARGREGS // Under CALLDESCR_FPARGREGS -ve offsets indicate arguments in floating point registers. If we have at // least one such argument we point the call worker at the floating point area of the frame (we leave // it null otherwise since the worker can perform a useful optimization if it knows no floating point // registers need to be set up). if (TransitionBlock::HasFloatRegister(ofs, argit.GetArgLocDescForStructInRegs()) && (callDescrData.pFloatArgumentRegisters == NULL)) { callDescrData.pFloatArgumentRegisters = (FloatArgumentRegisters*) (pTransitionBlock + TransitionBlock::GetOffsetOfFloatArgumentRegisters()); } #endif UINT structSize = argit.GetArgSize(); bool needsStackCopy = false; // A boxed Nullable<T> is represented as boxed T. So to pass a Nullable<T> by reference, // we have to create a Nullable<T> on stack, copy the T into it, then pass it to the callee and // after returning from the call, copy the T out of the Nullable<T> back to the boxed T. TypeHandle nullableType = NullableTypeOfByref(th); if (!nullableType.IsNull()) { th = nullableType; structSize = th.GetSize(); needsStackCopy = true; } #ifdef ENREGISTERED_PARAMTYPE_MAXSIZE else if (argit.IsArgPassedByRef()) { needsStackCopy = true; } #endif ArgDestination argDest(pTransitionBlock, ofs, argit.GetArgLocDescForStructInRegs()); if(needsStackCopy) { MethodTable * pMT = th.GetMethodTable(); _ASSERTE(pMT && pMT->IsValueType()); PVOID pArgDst = argDest.GetDestinationAddress(); PVOID pStackCopy = _alloca(structSize); *(PVOID *)pArgDst = pStackCopy; pArgDst = pStackCopy; if (!nullableType.IsNull()) { byRefToNullables = new(_alloca(sizeof(ByRefToNullable))) ByRefToNullable(i, pStackCopy, nullableType, byRefToNullables); } // save the info into ValueClassInfo if (pMT->ContainsPointers()) { pValueClasses = new (_alloca(sizeof(ValueClassInfo))) ValueClassInfo(pStackCopy, pMT, pValueClasses); } // We need a new ArgDestination that points to the stack copy argDest = ArgDestination(pStackCopy, 0, NULL); } InvokeUtil::CopyArg(th, &objs->GetAt(i), &argDest); } ENDFORBIDGC(); } if (pValueClasses != NULL) { pProtectValueClassFrame = new (_alloca (sizeof (FrameWithCookie<ProtectValueClassFrame>))) FrameWithCookie<ProtectValueClassFrame>(pThread, pValueClasses); } // Call the method bool fExceptionThrown = false; if (fWrapExceptions) { // The sole purpose of having this frame is to tell the debugger that we have a catch handler here // which may swallow managed exceptions. The debugger needs this in order to send a // CatchHandlerFound (CHF) notification. FrameWithCookie<DebuggerU2MCatchHandlerFrame> catchFrame(pThread); EX_TRY_THREAD(pThread) { CallDescrWorkerReflectionWrapper(&callDescrData, &catchFrame); } EX_CATCH{ // Rethrow transient exceptions for constructors for backward compatibility if (fConstructor && GET_EXCEPTION()->IsTransient()) { EX_RETHROW; } // Abuse retval to store the exception object gc.retVal = GET_THROWABLE(); _ASSERTE(gc.retVal); fExceptionThrown = true; } EX_END_CATCH(SwallowAllExceptions); catchFrame.Pop(pThread); } else { CallDescrWorkerWithHandler(&callDescrData); } // Now that we are safely out of the catch block, we can create and raise the // TargetInvocationException. if (fExceptionThrown) { ThrowInvokeMethodException(pMeth, gc.retVal); } // It is still illegal to do a GC here. The return type might have/contain GC pointers. if (fConstructor) { // We have a special case for Strings...The object is returned... if (fCtorOfVariableSizedObject) { PVOID pReturnValue = &callDescrData.returnValue; gc.retVal = *(OBJECTREF *)pReturnValue; } // If it is a Nullable<T>, box it using Nullable<T> conventions. // TODO: this double allocates on constructions which is wasteful gc.retVal = Nullable::NormalizeBox(gc.retVal); } else if (hasValueTypeReturn || hasRefReturnAndNeedsBoxing) { _ASSERTE(gc.retVal != NULL); if (hasRefReturnAndNeedsBoxing) { // Method has BYREF return and the target type is one that needs boxing. We need to copy into the boxed object we have allocated for this purpose. LPVOID pReturnedReference = *(LPVOID*)&callDescrData.returnValue; if (pReturnedReference == NULL) { COMPlusThrow(kNullReferenceException, W("NullReference_InvokeNullRefReturned")); } CopyValueClass(gc.retVal->GetData(), pReturnedReference, gc.retVal->GetMethodTable()); } // if the structure is returned by value, then we need to copy in the boxed object // we have allocated for this purpose. else if (!fHasRetBuffArg) { CopyValueClass(gc.retVal->GetData(), &callDescrData.returnValue, gc.retVal->GetMethodTable()); } // From here on out, it is OK to have GCs since the return object (which may have had // GC pointers has been put into a GC object and thus protected. // TODO this creates two objects which is inefficient // If the return type is a Nullable<T> box it into the correct form gc.retVal = Nullable::NormalizeBox(gc.retVal); } else if (retType == ELEMENT_TYPE_BYREF) { // WARNING: pReturnedReference is an unprotected inner reference so we must not trigger a GC until the referenced value has been safely captured. LPVOID pReturnedReference = *(LPVOID*)&callDescrData.returnValue; if (pReturnedReference == NULL) { COMPlusThrow(kNullReferenceException, W("NullReference_InvokeNullRefReturned")); } gc.retVal = InvokeUtil::CreateObjectAfterInvoke(refReturnTargetTH, pReturnedReference); } else { gc.retVal = InvokeUtil::CreateObjectAfterInvoke(retTH, &callDescrData.returnValue); } while (byRefToNullables != NULL) { OBJECTREF obj = Nullable::Box(byRefToNullables->data, byRefToNullables->type.GetMethodTable()); SetObjectReference(&objs->GetAt(byRefToNullables->argNum), obj); byRefToNullables = byRefToNullables->next; } if (pProtectValueClassFrame != NULL) pProtectValueClassFrame->Pop(pThread); } Done: ; HELPER_METHOD_FRAME_END(); return OBJECTREFToObject(gc.retVal); } FCIMPLEND struct SkipStruct { StackCrawlMark* pStackMark; MethodDesc* pMeth; }; // This method is called by the GetMethod function and will crawl backward // up the stack for integer methods. static StackWalkAction SkipMethods(CrawlFrame* frame, VOID* data) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; SkipStruct* pSkip = (SkipStruct*) data; MethodDesc *pFunc = frame->GetFunction(); /* We asked to be called back only for functions */ _ASSERTE(pFunc); // The check here is between the address of a local variable // (the stack mark) and a pointer to the EIP for a frame // (which is actually the pointer to the return address to the // function from the previous frame). So we'll actually notice // which frame the stack mark was in one frame later. This is // fine since we only implement LookForMyCaller. _ASSERTE(*pSkip->pStackMark == LookForMyCaller); if (!frame->IsInCalleesFrames(pSkip->pStackMark)) return SWA_CONTINUE; if (pFunc->RequiresInstMethodDescArg()) { pSkip->pMeth = (MethodDesc *) frame->GetParamTypeArg(); if (pSkip->pMeth == NULL) pSkip->pMeth = pFunc; } else pSkip->pMeth = pFunc; return SWA_ABORT; } // Return the MethodInfo that represents the current method (two above this one) FCIMPL1(ReflectMethodObject*, RuntimeMethodHandle::GetCurrentMethod, StackCrawlMark* stackMark) { FCALL_CONTRACT; REFLECTMETHODREF pRet = NULL; HELPER_METHOD_FRAME_BEGIN_RET_0(); SkipStruct skip; skip.pStackMark = stackMark; skip.pMeth = 0; StackWalkFunctions(GetThread(), SkipMethods, &skip); // If C<Foo>.m<Bar> was called, the stack walker returns C<object>.m<object>. We cannot // get know that the instantiation used Foo or Bar at that point. So the next best thing // is to return C<T>.m<P> and that's what LoadTypicalMethodDefinition will do for us. if (skip.pMeth != NULL) pRet = skip.pMeth->LoadTypicalMethodDefinition()->GetStubMethodInfo(); else pRet = NULL; HELPER_METHOD_FRAME_END(); return (ReflectMethodObject*)OBJECTREFToObject(pRet); } FCIMPLEND static OBJECTREF DirectObjectFieldGet(FieldDesc *pField, TypeHandle fieldType, TypeHandle enclosingType, TypedByRef *pTarget, CLR_BOOL *pDomainInitialized) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; PRECONDITION(CheckPointer(pField)); } CONTRACTL_END; OBJECTREF refRet; OBJECTREF objref = NULL; GCPROTECT_BEGIN(objref); if (!pField->IsStatic()) { objref = ObjectToOBJECTREF(*((Object**)pTarget->data)); } InvokeUtil::ValidateObjectTarget(pField, enclosingType, &objref); refRet = InvokeUtil::GetFieldValue(pField, fieldType, &objref, enclosingType, pDomainInitialized); GCPROTECT_END(); return refRet; } FCIMPL4(Object*, RuntimeFieldHandle::GetValueDirect, ReflectFieldObject *pFieldUNSAFE, ReflectClassBaseObject *pFieldTypeUNSAFE, TypedByRef *pTarget, ReflectClassBaseObject *pDeclaringTypeUNSAFE) { CONTRACTL { FCALL_CHECK; } CONTRACTL_END; struct { REFLECTCLASSBASEREF refFieldType; REFLECTCLASSBASEREF refDeclaringType; REFLECTFIELDREF refField; }gc; gc.refFieldType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pFieldTypeUNSAFE); gc.refDeclaringType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pDeclaringTypeUNSAFE); gc.refField = (REFLECTFIELDREF)ObjectToOBJECTREF(pFieldUNSAFE); if ((gc.refFieldType == NULL) || (gc.refField == NULL)) FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle")); TypeHandle fieldType = gc.refFieldType->GetType(); FieldDesc *pField = gc.refField->GetField(); Assembly *pAssem = pField->GetModule()->GetAssembly(); OBJECTREF refRet = NULL; CorElementType fieldElType; HELPER_METHOD_FRAME_BEGIN_RET_PROTECT(gc); // Find the Object and its type TypeHandle targetType = pTarget->type; _ASSERTE(gc.refDeclaringType == NULL || !gc.refDeclaringType->GetType().IsTypeDesc()); MethodTable *pEnclosingMT = (gc.refDeclaringType != NULL ? gc.refDeclaringType->GetType() : TypeHandle()).AsMethodTable(); CLR_BOOL domainInitialized = FALSE; if (pField->IsStatic() || !targetType.IsValueType()) { refRet = DirectObjectFieldGet(pField, fieldType, TypeHandle(pEnclosingMT), pTarget, &domainInitialized); goto lExit; } // Validate that the target type can be cast to the type that owns this field info. if (!targetType.CanCastTo(TypeHandle(pEnclosingMT))) COMPlusThrowArgumentException(W("obj"), NULL); // This is a workaround because from the previous case we may end up with an // Enum. We want to process it here. // Get the value from the field void* p; fieldElType = fieldType.GetSignatureCorElementType(); switch (fieldElType) { case ELEMENT_TYPE_VOID: _ASSERTE(!"Void used as Field Type!"); COMPlusThrow(kInvalidProgramException); case ELEMENT_TYPE_BOOLEAN: // boolean case ELEMENT_TYPE_I1: // byte case ELEMENT_TYPE_U1: // unsigned byte case ELEMENT_TYPE_I2: // short case ELEMENT_TYPE_U2: // unsigned short case ELEMENT_TYPE_CHAR: // char case ELEMENT_TYPE_I4: // int case ELEMENT_TYPE_U4: // unsigned int case ELEMENT_TYPE_I: case ELEMENT_TYPE_U: case ELEMENT_TYPE_R4: // float case ELEMENT_TYPE_I8: // long case ELEMENT_TYPE_U8: // unsigned long case ELEMENT_TYPE_R8: // double case ELEMENT_TYPE_VALUETYPE: _ASSERTE(!fieldType.IsTypeDesc()); p = ((BYTE*) pTarget->data) + pField->GetOffset(); refRet = fieldType.AsMethodTable()->Box(p); break; case ELEMENT_TYPE_OBJECT: case ELEMENT_TYPE_CLASS: case ELEMENT_TYPE_SZARRAY: // Single Dim, Zero case ELEMENT_TYPE_ARRAY: // general array p = ((BYTE*) pTarget->data) + pField->GetOffset(); refRet = ObjectToOBJECTREF(*(Object**) p); break; case ELEMENT_TYPE_PTR: { p = ((BYTE*) pTarget->data) + pField->GetOffset(); refRet = InvokeUtil::CreatePointer(fieldType, *(void **)p); break; } default: _ASSERTE(!"Unknown Type"); // this is really an impossible condition COMPlusThrow(kNotSupportedException); } lExit: ; HELPER_METHOD_FRAME_END(); return OBJECTREFToObject(refRet); } FCIMPLEND static void DirectObjectFieldSet(FieldDesc *pField, TypeHandle fieldType, TypeHandle enclosingType, TypedByRef *pTarget, OBJECTREF *pValue, CLR_BOOL *pDomainInitialized) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; PRECONDITION(CheckPointer(pField)); PRECONDITION(!fieldType.IsNull()); } CONTRACTL_END; OBJECTREF objref = NULL; GCPROTECT_BEGIN(objref); if (!pField->IsStatic()) { objref = ObjectToOBJECTREF(*((Object**)pTarget->data)); } // Validate the target/fld type relationship InvokeUtil::ValidateObjectTarget(pField, enclosingType, &objref); InvokeUtil::SetValidField(pField->GetFieldType(), fieldType, pField, &objref, pValue, enclosingType, pDomainInitialized); GCPROTECT_END(); } FCIMPL5(void, RuntimeFieldHandle::SetValueDirect, ReflectFieldObject *pFieldUNSAFE, ReflectClassBaseObject *pFieldTypeUNSAFE, TypedByRef *pTarget, Object *valueUNSAFE, ReflectClassBaseObject *pContextTypeUNSAFE) { CONTRACTL { FCALL_CHECK; } CONTRACTL_END; struct _gc { OBJECTREF oValue; REFLECTCLASSBASEREF pFieldType; REFLECTCLASSBASEREF pContextType; REFLECTFIELDREF refField; }gc; gc.oValue = ObjectToOBJECTREF(valueUNSAFE); gc.pFieldType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pFieldTypeUNSAFE); gc.pContextType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pContextTypeUNSAFE); gc.refField = (REFLECTFIELDREF)ObjectToOBJECTREF(pFieldUNSAFE); if ((gc.pFieldType == NULL) || (gc.refField == NULL)) FCThrowResVoid(kArgumentNullException, W("Arg_InvalidHandle")); TypeHandle fieldType = gc.pFieldType->GetType(); TypeHandle contextType = (gc.pContextType != NULL) ? gc.pContextType->GetType() : NULL; FieldDesc *pField = gc.refField->GetField(); Assembly *pAssem = pField->GetModule()->GetAssembly(); BYTE *pDst = NULL; ARG_SLOT value = NULL; CorElementType fieldElType; HELPER_METHOD_FRAME_BEGIN_PROTECT(gc); // Find the Object and its type TypeHandle targetType = pTarget->type; MethodTable *pEnclosingMT = contextType.GetMethodTable(); // Verify that the value passed can be widened into the target InvokeUtil::ValidField(fieldType, &gc.oValue); CLR_BOOL domainInitialized = FALSE; if (pField->IsStatic() || !targetType.IsValueType()) { DirectObjectFieldSet(pField, fieldType, TypeHandle(pEnclosingMT), pTarget, &gc.oValue, &domainInitialized); goto lExit; } if (gc.oValue == NULL && fieldType.IsValueType() && !Nullable::IsNullableType(fieldType)) COMPlusThrowArgumentNull(W("value")); // Validate that the target type can be cast to the type that owns this field info. if (!targetType.CanCastTo(TypeHandle(pEnclosingMT))) COMPlusThrowArgumentException(W("obj"), NULL); // Set the field fieldElType = fieldType.GetInternalCorElementType(); if (ELEMENT_TYPE_BOOLEAN <= fieldElType && fieldElType <= ELEMENT_TYPE_R8) { CorElementType objType = gc.oValue->GetTypeHandle().GetInternalCorElementType(); if (objType != fieldElType) InvokeUtil::CreatePrimitiveValue(fieldElType, objType, gc.oValue, &value); else value = *(ARG_SLOT*)gc.oValue->UnBox(); } pDst = ((BYTE*) pTarget->data) + pField->GetOffset(); switch (fieldElType) { case ELEMENT_TYPE_VOID: _ASSERTE(!"Void used as Field Type!"); COMPlusThrow(kInvalidProgramException); case ELEMENT_TYPE_BOOLEAN: // boolean case ELEMENT_TYPE_I1: // byte case ELEMENT_TYPE_U1: // unsigned byte VolatileStore((UINT8*)pDst, *(UINT8*)&value); break; case ELEMENT_TYPE_I2: // short case ELEMENT_TYPE_U2: // unsigned short case ELEMENT_TYPE_CHAR: // char VolatileStore((UINT16*)pDst, *(UINT16*)&value); break; case ELEMENT_TYPE_I4: // int case ELEMENT_TYPE_U4: // unsigned int case ELEMENT_TYPE_R4: // float VolatileStore((UINT32*)pDst, *(UINT32*)&value); break; case ELEMENT_TYPE_I8: // long case ELEMENT_TYPE_U8: // unsigned long case ELEMENT_TYPE_R8: // double VolatileStore((UINT64*)pDst, *(UINT64*)&value); break; case ELEMENT_TYPE_I: { INT_PTR valuePtr = (INT_PTR) InvokeUtil::GetIntPtrValue(gc.oValue); VolatileStore((INT_PTR*) pDst, valuePtr); } break; case ELEMENT_TYPE_U: { UINT_PTR valuePtr = (UINT_PTR) InvokeUtil::GetIntPtrValue(gc.oValue); VolatileStore((UINT_PTR*) pDst, valuePtr); } break; case ELEMENT_TYPE_PTR: // pointers if (gc.oValue != 0) { value = 0; if (CoreLibBinder::IsClass(gc.oValue->GetMethodTable(), CLASS__POINTER)) { value = (size_t) InvokeUtil::GetPointerValue(gc.oValue); #ifdef _MSC_VER #pragma warning(disable: 4267) //work-around for compiler #endif VolatileStore((size_t*) pDst, (size_t) value); #ifdef _MSC_VER #pragma warning(default: 4267) #endif break; } } FALLTHROUGH; case ELEMENT_TYPE_FNPTR: { value = 0; if (gc.oValue != 0) { CorElementType objType = gc.oValue->GetTypeHandle().GetInternalCorElementType(); InvokeUtil::CreatePrimitiveValue(objType, objType, gc.oValue, &value); } #ifdef _MSC_VER #pragma warning(disable: 4267) //work-around for compiler #endif VolatileStore((size_t*) pDst, (size_t) value); #ifdef _MSC_VER #pragma warning(default: 4267) #endif } break; case ELEMENT_TYPE_SZARRAY: // Single Dim, Zero case ELEMENT_TYPE_ARRAY: // General Array case ELEMENT_TYPE_CLASS: case ELEMENT_TYPE_OBJECT: SetObjectReference((OBJECTREF*)pDst, gc.oValue); break; case ELEMENT_TYPE_VALUETYPE: { _ASSERTE(!fieldType.IsTypeDesc()); MethodTable* pMT = fieldType.AsMethodTable(); // If we have a null value then we must create an empty field if (gc.oValue == 0) InitValueClass(pDst, pMT); else { pMT->UnBoxIntoUnchecked(pDst, gc.oValue); } } break; default: _ASSERTE(!"Unknown Type"); // this is really an impossible condition COMPlusThrow(kNotSupportedException); } lExit: ; HELPER_METHOD_FRAME_END(); } FCIMPLEND extern "C" void QCALLTYPE ReflectionInvocation_CompileMethod(MethodDesc * pMD) { QCALL_CONTRACT; // Argument is checked on the managed side PRECONDITION(pMD != NULL); if (!pMD->IsPointingToPrestub()) return; BEGIN_QCALL; pMD->DoPrestub(NULL); END_QCALL; } // This method triggers the class constructor for a give type extern "C" void QCALLTYPE ReflectionInvocation_RunClassConstructor(QCall::TypeHandle pType) { QCALL_CONTRACT; TypeHandle typeHnd = pType.AsTypeHandle(); if (typeHnd.IsTypeDesc()) return; MethodTable *pMT = typeHnd.AsMethodTable(); if (pMT->IsClassInited()) return; BEGIN_QCALL; pMT->CheckRestore(); pMT->EnsureInstanceActive(); pMT->CheckRunClassInitThrowing(); END_QCALL; } // This method triggers the module constructor for a given module extern "C" void QCALLTYPE ReflectionInvocation_RunModuleConstructor(QCall::ModuleHandle pModule) { QCALL_CONTRACT; DomainAssembly *pDomainAssembly = pModule->GetDomainAssembly(); if (pDomainAssembly != NULL && pDomainAssembly->IsActive()) return; BEGIN_QCALL; pDomainAssembly->EnsureActive(); END_QCALL; } static void PrepareMethodHelper(MethodDesc * pMD) { STANDARD_VM_CONTRACT; pMD->EnsureActive(); if (pMD->IsPointingToPrestub()) pMD->DoPrestub(NULL); if (pMD->IsWrapperStub()) { pMD = pMD->GetWrappedMethodDesc(); if (pMD->IsPointingToPrestub()) pMD->DoPrestub(NULL); } } // This method triggers a given method to be jitted. CoreCLR implementation of this method triggers jiting of the given method only. // It does not walk a subset of callgraph to provide CER guarantees. extern "C" void QCALLTYPE ReflectionInvocation_PrepareMethod(MethodDesc *pMD, TypeHandle *pInstantiation, UINT32 cInstantiation) { CONTRACTL { QCALL_CHECK; PRECONDITION(pMD != NULL); PRECONDITION(CheckPointer(pInstantiation, NULL_OK)); } CONTRACTL_END; BEGIN_QCALL; if (pMD->IsAbstract()) COMPlusThrow(kArgumentException, W("Argument_CannotPrepareAbstract")); MethodTable * pExactMT = pMD->GetMethodTable(); if (pInstantiation != NULL) { // We were handed an instantiation, check that the method expects it and the right number of types has been provided (the // caller supplies one array containing the class instantiation immediately followed by the method instantiation). if (cInstantiation != (pMD->GetNumGenericMethodArgs() + pMD->GetNumGenericClassArgs())) COMPlusThrow(kArgumentException, W("Argument_InvalidGenericInstantiation")); // Check we've got a reasonable looking instantiation. if (!Generics::CheckInstantiation(Instantiation(pInstantiation, cInstantiation))) COMPlusThrow(kArgumentException, W("Argument_InvalidGenericInstantiation")); for (ULONG i = 0; i < cInstantiation; i++) if (pInstantiation[i].ContainsGenericVariables()) COMPlusThrow(kArgumentException, W("Argument_InvalidGenericInstantiation")); TypeHandle thExactType = ClassLoader::LoadGenericInstantiationThrowing(pMD->GetModule(), pMD->GetMethodTable()->GetCl(), Instantiation(pInstantiation, pMD->GetNumGenericClassArgs())); pExactMT = thExactType.AsMethodTable(); pMD = MethodDesc::FindOrCreateAssociatedMethodDesc(pMD, pExactMT, FALSE, Instantiation(&pInstantiation[pMD->GetNumGenericClassArgs()], pMD->GetNumGenericMethodArgs()), FALSE); } if (pMD->ContainsGenericVariables()) COMPlusThrow(kArgumentException, W("Argument_InvalidGenericInstantiation")); PrepareMethodHelper(pMD); END_QCALL; } // This method triggers target of a given method to be jitted. CoreCLR implementation of this method triggers jiting // of the given method only. It does not walk a subset of callgraph to provide CER guarantees. // In the case of a multi-cast delegate, we rely on the fact that each individual component // was prepared prior to the Combine. FCIMPL1(void, ReflectionInvocation::PrepareDelegate, Object* delegateUNSAFE) { CONTRACTL { FCALL_CHECK; PRECONDITION(CheckPointer(delegateUNSAFE, NULL_OK)); } CONTRACTL_END; if (delegateUNSAFE == NULL) return; OBJECTREF delegate = ObjectToOBJECTREF(delegateUNSAFE); HELPER_METHOD_FRAME_BEGIN_1(delegate); MethodDesc *pMD = COMDelegate::GetMethodDesc(delegate); GCX_PREEMP(); PrepareMethodHelper(pMD); HELPER_METHOD_FRAME_END(); } FCIMPLEND // This method checks to see if there is sufficient stack to execute the average Framework method. // If there is not, then it throws System.InsufficientExecutionStackException. The limit for each // thread is precomputed when the thread is created. FCIMPL0(void, ReflectionInvocation::EnsureSufficientExecutionStack) { FCALL_CONTRACT; Thread *pThread = GetThread(); // We use the address of a local variable as our "current stack pointer", which is // plenty close enough for the purposes of this method. UINT_PTR current = reinterpret_cast<UINT_PTR>(&pThread); UINT_PTR limit = pThread->GetCachedStackSufficientExecutionLimit(); if (current < limit) { FCThrowVoid(kInsufficientExecutionStackException); } } FCIMPLEND // As with EnsureSufficientExecutionStack, this method checks and returns whether there is // sufficient stack to execute the average Framework method, but rather than throwing, // it simply returns a Boolean: true for sufficient stack space, otherwise false. FCIMPL0(FC_BOOL_RET, ReflectionInvocation::TryEnsureSufficientExecutionStack) { FCALL_CONTRACT; Thread *pThread = GetThread(); // Same logic as EnsureSufficientExecutionStack UINT_PTR current = reinterpret_cast<UINT_PTR>(&pThread); UINT_PTR limit = pThread->GetCachedStackSufficientExecutionLimit(); FC_RETURN_BOOL(current >= limit); } FCIMPLEND FCIMPL4(void, ReflectionInvocation::MakeTypedReference, TypedByRef * value, Object* targetUNSAFE, ArrayBase* fldsUNSAFE, ReflectClassBaseObject *pFieldTypeUNSAFE) { CONTRACTL { FCALL_CHECK; PRECONDITION(CheckPointer(targetUNSAFE)); PRECONDITION(CheckPointer(fldsUNSAFE)); } CONTRACTL_END; DWORD offset = 0; struct _gc { OBJECTREF target; BASEARRAYREF flds; REFLECTCLASSBASEREF refFieldType; } gc; gc.target = (OBJECTREF) targetUNSAFE; gc.flds = (BASEARRAYREF) fldsUNSAFE; gc.refFieldType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pFieldTypeUNSAFE); TypeHandle fieldType = gc.refFieldType->GetType(); HELPER_METHOD_FRAME_BEGIN_PROTECT(gc); GCPROTECT_BEGININTERIOR (value) DWORD cnt = gc.flds->GetNumComponents(); FieldDesc** fields = (FieldDesc**)gc.flds->GetDataPtr(); for (DWORD i = 0; i < cnt; i++) { FieldDesc* pField = fields[i]; offset += pField->GetOffset(); } // Fields already are prohibted from having ArgIterator and RuntimeArgumentHandles _ASSERTE(!gc.target->GetTypeHandle().GetMethodTable()->IsByRefLike()); // Create the ByRef value->data = ((BYTE *)(gc.target->GetAddress() + offset)) + sizeof(Object); value->type = fieldType; GCPROTECT_END(); HELPER_METHOD_FRAME_END(); } FCIMPLEND // This is an internal helper function to TypedReference class. // It extracts the object from the typed reference. FCIMPL1(Object*, ReflectionInvocation::TypedReferenceToObject, TypedByRef * value) { FCALL_CONTRACT; OBJECTREF Obj = NULL; TypeHandle th(value->type); if (th.IsNull()) FCThrowRes(kArgumentNullException, W("ArgumentNull_TypedRefType")); MethodTable* pMT = th.GetMethodTable(); PREFIX_ASSUME(NULL != pMT); if (pMT->IsValueType()) { // value->data is protected by the caller HELPER_METHOD_FRAME_BEGIN_RET_1(Obj); Obj = pMT->Box(value->data); HELPER_METHOD_FRAME_END(); } else { Obj = ObjectToOBJECTREF(*((Object**)value->data)); } return OBJECTREFToObject(Obj); } FCIMPLEND FCIMPL2_IV(Object*, ReflectionInvocation::CreateEnum, ReflectClassBaseObject *pTypeUNSAFE, INT64 value) { FCALL_CONTRACT; REFLECTCLASSBASEREF refType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pTypeUNSAFE); TypeHandle typeHandle = refType->GetType(); _ASSERTE(typeHandle.IsEnum()); OBJECTREF obj = NULL; HELPER_METHOD_FRAME_BEGIN_RET_1(refType); MethodTable *pEnumMT = typeHandle.AsMethodTable(); obj = pEnumMT->Box(ArgSlotEndianessFixup ((ARG_SLOT*)&value, pEnumMT->GetNumInstanceFieldBytes())); HELPER_METHOD_FRAME_END(); return OBJECTREFToObject(obj); } FCIMPLEND #ifdef FEATURE_COMINTEROP FCIMPL8(Object*, ReflectionInvocation::InvokeDispMethod, ReflectClassBaseObject* refThisUNSAFE, StringObject* nameUNSAFE, INT32 invokeAttr, Object* targetUNSAFE, PTRArray* argsUNSAFE, PTRArray* byrefModifiersUNSAFE, LCID lcid, PTRArray* namedParametersUNSAFE) { FCALL_CONTRACT; struct _gc { REFLECTCLASSBASEREF refThis; STRINGREF name; OBJECTREF target; PTRARRAYREF args; PTRARRAYREF byrefModifiers; PTRARRAYREF namedParameters; OBJECTREF RetObj; } gc; gc.refThis = (REFLECTCLASSBASEREF) refThisUNSAFE; gc.name = (STRINGREF) nameUNSAFE; gc.target = (OBJECTREF) targetUNSAFE; gc.args = (PTRARRAYREF) argsUNSAFE; gc.byrefModifiers = (PTRARRAYREF) byrefModifiersUNSAFE; gc.namedParameters = (PTRARRAYREF) namedParametersUNSAFE; gc.RetObj = NULL; HELPER_METHOD_FRAME_BEGIN_RET_PROTECT(gc); _ASSERTE(gc.target != NULL); _ASSERTE(gc.target->GetMethodTable()->IsComObjectType()); WORD flags = 0; if (invokeAttr & BINDER_InvokeMethod) flags |= DISPATCH_METHOD; if (invokeAttr & BINDER_GetProperty) flags |= DISPATCH_PROPERTYGET; if (invokeAttr & BINDER_SetProperty) flags = DISPATCH_PROPERTYPUT | DISPATCH_PROPERTYPUTREF; if (invokeAttr & BINDER_PutDispProperty) flags = DISPATCH_PROPERTYPUT; if (invokeAttr & BINDER_PutRefDispProperty) flags = DISPATCH_PROPERTYPUTREF; if (invokeAttr & BINDER_CreateInstance) flags = DISPATCH_CONSTRUCT; IUInvokeDispMethod(&gc.refThis, &gc.target, (OBJECTREF*)&gc.name, NULL, (OBJECTREF*)&gc.args, (OBJECTREF*)&gc.byrefModifiers, (OBJECTREF*)&gc.namedParameters, &gc.RetObj, lcid, flags, invokeAttr & BINDER_IgnoreReturn, invokeAttr & BINDER_IgnoreCase); HELPER_METHOD_FRAME_END(); return OBJECTREFToObject(gc.RetObj); } FCIMPLEND #endif // FEATURE_COMINTEROP FCIMPL2(void, ReflectionInvocation::GetGUID, ReflectClassBaseObject* refThisUNSAFE, GUID * result) { FCALL_CONTRACT; REFLECTCLASSBASEREF refThis = (REFLECTCLASSBASEREF) refThisUNSAFE; HELPER_METHOD_FRAME_BEGIN_1(refThis); GCPROTECT_BEGININTERIOR (result); if (result == NULL || refThis == NULL) COMPlusThrow(kNullReferenceException); TypeHandle type = refThis->GetType(); if (type.IsTypeDesc() || type.IsArray()) { memset(result,0,sizeof(GUID)); goto lExit; } #ifdef FEATURE_COMINTEROP if (IsComObjectClass(type)) { SyncBlock* pSyncBlock = refThis->GetSyncBlock(); #ifdef FEATURE_COMINTEROP_UNMANAGED_ACTIVATION ComClassFactory* pComClsFac = pSyncBlock->GetInteropInfo()->GetComClassFactory(); if (pComClsFac) { memcpyNoGCRefs(result, &pComClsFac->m_rclsid, sizeof(GUID)); } else #endif // FEATURE_COMINTEROP_UNMANAGED_ACTIVATION { memset(result, 0, sizeof(GUID)); } goto lExit; } #endif // FEATURE_COMINTEROP GUID guid; type.AsMethodTable()->GetGuid(&guid, TRUE); memcpyNoGCRefs(result, &guid, sizeof(GUID)); lExit: ; GCPROTECT_END(); HELPER_METHOD_FRAME_END(); } FCIMPLEND /* * Given a TypeHandle, validates whether it's legal to construct a real * instance of that type. Throws an exception if the instantiation would * be illegal; e.g., type is void or a pointer or an open generic. This * doesn't guarantee that a ctor will succeed, only that the VM is able * to support an instance of this type on the heap. * ========== * The 'fForGetUninitializedInstance' parameter controls the type of * exception that is thrown if a check fails. */ void RuntimeTypeHandle::ValidateTypeAbleToBeInstantiated( TypeHandle typeHandle, bool fGetUninitializedObject) { STANDARD_VM_CONTRACT; // Don't allow void if (typeHandle.GetSignatureCorElementType() == ELEMENT_TYPE_VOID) { COMPlusThrow(kArgumentException, W("NotSupported_Type")); } // Don't allow arrays, pointers, byrefs, or function pointers if (typeHandle.IsTypeDesc() || typeHandle.IsArray()) { COMPlusThrow(fGetUninitializedObject ? kArgumentException : kMissingMethodException, W("NotSupported_Type")); } MethodTable* pMT = typeHandle.AsMethodTable(); PREFIX_ASSUME(pMT != NULL); // Don't allow creating instances of delegates if (pMT->IsDelegate()) { COMPlusThrow(kArgumentException, W("NotSupported_Type")); } // Don't allow string or string-like (variable length) types. if (pMT->HasComponentSize()) { COMPlusThrow(fGetUninitializedObject ? kArgumentException : kMissingMethodException, W("Argument_NoUninitializedStrings")); } // Don't allow abstract classes or interface types if (pMT->IsAbstract()) { RuntimeExceptionKind exKind = fGetUninitializedObject ? kMemberAccessException : kMissingMethodException; if (pMT->IsInterface()) COMPlusThrow(exKind, W("Acc_CreateInterface")); else COMPlusThrow(exKind, W("Acc_CreateAbst")); } // Don't allow generic variables (e.g., the 'T' from List<T>) // or open generic types (List<>). if (typeHandle.ContainsGenericVariables()) { COMPlusThrow(kMemberAccessException, W("Acc_CreateGeneric")); } // Don't allow generics instantiated over __Canon if (pMT->IsSharedByGenericInstantiations()) { COMPlusThrow(kNotSupportedException, W("NotSupported_Type")); } // Don't allow ref structs if (pMT->IsByRefLike()) { COMPlusThrow(kNotSupportedException, W("NotSupported_ByRefLike")); } } /* * Given a RuntimeType, queries info on how to instantiate the object. * pRuntimeType - [required] the RuntimeType object * ppfnAllocator - [required, null-init] fnptr to the allocator * mgd sig: void* -> object * pvAllocatorFirstArg - [required, null-init] first argument to the allocator * (normally, but not always, the MethodTable*) * ppfnCtor - [required, null-init] the instance's parameterless ctor, * mgd sig object -> void, or null if no ctor is needed for this type * pfCtorIsPublic - [required, null-init] whether the parameterless ctor is public * ========== * This method will not run the type's static cctor. * This method will not allocate an instance of the target type. */ extern "C" void QCALLTYPE RuntimeTypeHandle_GetActivationInfo( QCall::ObjectHandleOnStack pRuntimeType, PCODE* ppfnAllocator, void** pvAllocatorFirstArg, PCODE* ppfnCtor, BOOL* pfCtorIsPublic ) { CONTRACTL{ QCALL_CHECK; PRECONDITION(CheckPointer(ppfnAllocator)); PRECONDITION(CheckPointer(pvAllocatorFirstArg)); PRECONDITION(CheckPointer(ppfnCtor)); PRECONDITION(CheckPointer(pfCtorIsPublic)); PRECONDITION(*ppfnAllocator == NULL); PRECONDITION(*pvAllocatorFirstArg == NULL); PRECONDITION(*ppfnCtor == NULL); PRECONDITION(*pfCtorIsPublic == FALSE); } CONTRACTL_END; TypeHandle typeHandle = NULL; BEGIN_QCALL; { GCX_COOP(); // We need to take the RuntimeType itself rather than the RuntimeTypeHandle, // as the COM CLSID is stored in the RuntimeType object's sync block, and we // might need to pull it out later in this method. typeHandle = ((REFLECTCLASSBASEREF)pRuntimeType.Get())->GetType(); } RuntimeTypeHandle::ValidateTypeAbleToBeInstantiated(typeHandle, false /* fGetUninitializedObject */); MethodTable* pMT = typeHandle.AsMethodTable(); PREFIX_ASSUME(pMT != NULL); #ifdef FEATURE_COMINTEROP // COM allocation can involve the __ComObject base type (with attached CLSID) or a // VM-implemented [ComImport] class. For CreateInstance, the flowchart is: // - For __ComObject, // .. on Windows, bypass normal newobj logic and use ComClassFactory::CreateInstance. // .. on non-Windows, treat as a normal class, type has no special handling in VM. // - For [ComImport] class, treat as a normal class. VM will replace default // ctor with COM activation logic on supported platforms, else ctor itself will PNSE. // IsComObjectClass is the correct way to check for __ComObject specifically if (IsComObjectClass(typeHandle)) { void* pClassFactory = NULL; #ifdef FEATURE_COMINTEROP_UNMANAGED_ACTIVATION { // Need to enter cooperative mode to manipulate OBJECTREFs GCX_COOP(); SyncBlock* pSyncBlock = pRuntimeType.Get()->GetSyncBlock(); pClassFactory = (void*)pSyncBlock->GetInteropInfo()->GetComClassFactory(); } #endif // FEATURE_COMINTEROP_UNMANAGED_ACTIVATION if (pClassFactory == NULL) { // no factory *or* unmanaged activation is not enabled in this runtime COMPlusThrow(kInvalidComObjectException, IDS_EE_NO_BACKING_CLASS_FACTORY); } // managed sig: ComClassFactory* -> object (via FCALL) *ppfnAllocator = CoreLibBinder::GetMethod(METHOD__RT_TYPE_HANDLE__ALLOCATECOMOBJECT)->GetMultiCallableAddrOfCode(); *pvAllocatorFirstArg = pClassFactory; *ppfnCtor = NULL; // no ctor call needed; activation handled entirely by the allocator *pfCtorIsPublic = TRUE; // no ctor call needed => assume 'public' equivalent } else #endif // FEATURE_COMINTEROP if (pMT->IsNullable()) { // CreateInstance returns null given Nullable<T> *ppfnAllocator = NULL; *pvAllocatorFirstArg = NULL; *ppfnCtor = NULL; *pfCtorIsPublic = TRUE; // no ctor call needed => assume 'public' equivalent } else { // managed sig: MethodTable* -> object (via JIT helper) bool fHasSideEffectsUnused; *ppfnAllocator = CEEJitInfo::getHelperFtnStatic(CEEInfo::getNewHelperStatic(pMT, &fHasSideEffectsUnused)); *pvAllocatorFirstArg = pMT; if (pMT->HasDefaultConstructor()) { // managed sig: object -> void // for ctors on value types, lookup boxed entry point stub MethodDesc* pMD = pMT->GetDefaultConstructor(pMT->IsValueType() /* forceBoxedEntryPoint */); _ASSERTE(pMD != NULL); PCODE pCode = pMD->GetMultiCallableAddrOfCode(); _ASSERTE(pCode != NULL); *ppfnCtor = pCode; *pfCtorIsPublic = pMD->IsPublic(); } else if (pMT->IsValueType()) { *ppfnCtor = NULL; // no ctor call needed; we're creating a boxed default(T) *pfCtorIsPublic = TRUE; // no ctor call needed => assume 'public' equivalent } else { // reference type with no parameterless ctor - we can't instantiate this COMPlusThrow(kMissingMethodException, W("Arg_NoDefCTorWithoutTypeName")); } } pMT->EnsureInstanceActive(); END_QCALL; } /* * Given a ComClassFactory*, calls the COM allocator * and returns a RCW. */ FCIMPL1(Object*, RuntimeTypeHandle::AllocateComObject, void* pClassFactory) { CONTRACTL{ FCALL_CHECK; PRECONDITION(CheckPointer(pClassFactory)); } CONTRACTL_END; OBJECTREF rv = NULL; bool allocated = false; HELPER_METHOD_FRAME_BEGIN_RET_1(rv); #ifdef FEATURE_COMINTEROP #ifdef FEATURE_COMINTEROP_UNMANAGED_ACTIVATION { if (pClassFactory != NULL) { rv = ((ComClassFactory*)pClassFactory)->CreateInstance(NULL); allocated = true; } } #endif // FEATURE_COMINTEROP_UNMANAGED_ACTIVATION #endif // FEATURE_COMINTEROP if (!allocated) { #ifdef FEATURE_COMINTEROP COMPlusThrow(kInvalidComObjectException, IDS_EE_NO_BACKING_CLASS_FACTORY); #else // FEATURE_COMINTEROP COMPlusThrow(kPlatformNotSupportedException, IDS_EE_NO_BACKING_CLASS_FACTORY); #endif // FEATURE_COMINTEROP } HELPER_METHOD_FRAME_END(); return OBJECTREFToObject(rv); } FCIMPLEND //************************************************************************************************* //************************************************************************************************* //************************************************************************************************* // ReflectionSerialization //************************************************************************************************* //************************************************************************************************* //************************************************************************************************* extern "C" void QCALLTYPE ReflectionSerialization_GetUninitializedObject(QCall::TypeHandle pType, QCall::ObjectHandleOnStack retObject) { QCALL_CONTRACT; BEGIN_QCALL; TypeHandle type = pType.AsTypeHandle(); RuntimeTypeHandle::ValidateTypeAbleToBeInstantiated(type, true /* fForGetUninitializedInstance */); MethodTable* pMT = type.AsMethodTable(); #ifdef FEATURE_COMINTEROP // Also do not allow allocation of uninitialized RCWs (COM objects). if (pMT->IsComObjectType()) COMPlusThrow(kNotSupportedException, W("NotSupported_ManagedActivation")); #endif // FEATURE_COMINTEROP // If it is a nullable, return the underlying type instead. if (pMT->IsNullable()) pMT = pMT->GetInstantiation()[0].GetMethodTable(); { GCX_COOP(); // Allocation will invoke any precise static cctors as needed. retObject.Set(pMT->Allocate()); } END_QCALL; } //************************************************************************************************* //************************************************************************************************* //************************************************************************************************* // ReflectionEnum //************************************************************************************************* //************************************************************************************************* //************************************************************************************************* FCIMPL1(Object *, ReflectionEnum::InternalGetEnumUnderlyingType, ReflectClassBaseObject *target) { FCALL_CONTRACT; VALIDATEOBJECT(target); TypeHandle th = target->GetType(); _ASSERTE(th.IsEnum()); OBJECTREF result = NULL; HELPER_METHOD_FRAME_BEGIN_RET_0(); MethodTable *pMT = CoreLibBinder::GetElementType(th.AsMethodTable()->GetInternalCorElementType()); result = pMT->GetManagedClassObject(); HELPER_METHOD_FRAME_END(); return OBJECTREFToObject(result); } FCIMPLEND FCIMPL1(INT32, ReflectionEnum::InternalGetCorElementType, Object *pRefThis) { FCALL_CONTRACT; VALIDATEOBJECT(pRefThis); if (pRefThis == NULL) FCThrowArgumentNull(NULL); MethodTable* pMT = pRefThis->GetMethodTable(); _ASSERTE(pMT->IsEnum()); // MethodTable::GetInternalCorElementType has unnecessary overhead for enums // Call EEClass::GetInternalCorElementType directly to avoid it return pMT->GetClass_NoLogging()->GetInternalCorElementType(); } FCIMPLEND //******************************************************************************* struct TempEnumValue { LPCUTF8 name; UINT64 value; }; //******************************************************************************* class TempEnumValueSorter : public CQuickSort<TempEnumValue> { public: TempEnumValueSorter(TempEnumValue *pArray, SSIZE_T iCount) : CQuickSort<TempEnumValue>(pArray, iCount) { LIMITED_METHOD_CONTRACT; } int Compare(TempEnumValue *pFirst, TempEnumValue *pSecond) { LIMITED_METHOD_CONTRACT; if (pFirst->value == pSecond->value) return 0; if (pFirst->value > pSecond->value) return 1; else return -1; } }; extern "C" void QCALLTYPE Enum_GetValuesAndNames(QCall::TypeHandle pEnumType, QCall::ObjectHandleOnStack pReturnValues, QCall::ObjectHandleOnStack pReturnNames, BOOL fGetNames) { QCALL_CONTRACT; BEGIN_QCALL; TypeHandle th = pEnumType.AsTypeHandle(); if (!th.IsEnum()) COMPlusThrow(kArgumentException, W("Arg_MustBeEnum")); MethodTable *pMT = th.AsMethodTable(); IMDInternalImport *pImport = pMT->GetMDImport(); StackSArray<TempEnumValue> temps; UINT64 previousValue = 0; HENUMInternalHolder fieldEnum(pImport); fieldEnum.EnumInit(mdtFieldDef, pMT->GetCl()); // // Note that we're fine treating signed types as unsigned, because all we really // want to do is sort them based on a convenient strong ordering. // BOOL sorted = TRUE; CorElementType type = pMT->GetInternalCorElementType(); mdFieldDef field; while (pImport->EnumNext(&fieldEnum, &field)) { DWORD dwFlags; IfFailThrow(pImport->GetFieldDefProps(field, &dwFlags)); if (IsFdStatic(dwFlags)) { TempEnumValue temp; if (fGetNames) IfFailThrow(pImport->GetNameOfFieldDef(field, &temp.name)); UINT64 value = 0; MDDefaultValue defaultValue; IfFailThrow(pImport->GetDefaultValue(field, &defaultValue)); // The following code assumes that the address of all union members is the same. static_assert_no_msg(offsetof(MDDefaultValue, m_byteValue) == offsetof(MDDefaultValue, m_usValue)); static_assert_no_msg(offsetof(MDDefaultValue, m_ulValue) == offsetof(MDDefaultValue, m_ullValue)); PVOID pValue = &defaultValue.m_byteValue; switch (type) { case ELEMENT_TYPE_I1: value = *((INT8 *)pValue); break; case ELEMENT_TYPE_U1: case ELEMENT_TYPE_BOOLEAN: value = *((UINT8 *)pValue); break; case ELEMENT_TYPE_I2: value = *((INT16 *)pValue); break; case ELEMENT_TYPE_U2: case ELEMENT_TYPE_CHAR: value = *((UINT16 *)pValue); break; case ELEMENT_TYPE_I4: IN_TARGET_32BIT(case ELEMENT_TYPE_I:) value = *((INT32 *)pValue); break; case ELEMENT_TYPE_U4: IN_TARGET_32BIT(case ELEMENT_TYPE_U:) value = *((UINT32 *)pValue); break; case ELEMENT_TYPE_I8: case ELEMENT_TYPE_U8: IN_TARGET_64BIT(case ELEMENT_TYPE_I:) IN_TARGET_64BIT(case ELEMENT_TYPE_U:) value = *((INT64 *)pValue); break; default: break; } temp.value = value; // // Check to see if we are already sorted. This may seem extraneous, but is // actually probably the normal case. // if (previousValue > value) sorted = FALSE; previousValue = value; temps.Append(temp); } } TempEnumValue * pTemps = &(temps[0]); DWORD cFields = temps.GetCount(); if (!sorted) { TempEnumValueSorter sorter(pTemps, cFields); sorter.Sort(); } { GCX_COOP(); struct gc { I8ARRAYREF values; PTRARRAYREF names; } gc; gc.values = NULL; gc.names = NULL; GCPROTECT_BEGIN(gc); { gc.values = (I8ARRAYREF) AllocatePrimitiveArray(ELEMENT_TYPE_U8, cFields); INT64 *pToValues = gc.values->GetDirectPointerToNonObjectElements(); for (DWORD i = 0; i < cFields; i++) { pToValues[i] = pTemps[i].value; } pReturnValues.Set(gc.values); } if (fGetNames) { gc.names = (PTRARRAYREF) AllocateObjectArray(cFields, g_pStringClass); for (DWORD i = 0; i < cFields; i++) { STRINGREF str = StringObject::NewString(pTemps[i].name); gc.names->SetAt(i, str); } pReturnNames.Set(gc.names); } GCPROTECT_END(); } END_QCALL; } FCIMPL2_IV(Object*, ReflectionEnum::InternalBoxEnum, ReflectClassBaseObject* target, INT64 value) { FCALL_CONTRACT; VALIDATEOBJECT(target); OBJECTREF ret = NULL; MethodTable* pMT = target->GetType().AsMethodTable(); HELPER_METHOD_FRAME_BEGIN_RET_0(); ret = pMT->Box(ArgSlotEndianessFixup((ARG_SLOT*)&value, pMT->GetNumInstanceFieldBytes())); HELPER_METHOD_FRAME_END(); return OBJECTREFToObject(ret); } FCIMPLEND
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // #include "common.h" #include "reflectioninvocation.h" #include "invokeutil.h" #include "object.h" #include "class.h" #include "method.hpp" #include "typehandle.h" #include "field.h" #include "eeconfig.h" #include "vars.hpp" #include "jitinterface.h" #include "contractimpl.h" #include "virtualcallstub.h" #include "comdelegate.h" #include "generics.h" #ifdef FEATURE_COMINTEROP #include "interoputil.h" #include "runtimecallablewrapper.h" #endif #include "dbginterface.h" #include "argdestination.h" /**************************************************************************/ /* if the type handle 'th' is a byref to a nullable type, return the type handle to the nullable type in the byref. Otherwise return the null type handle */ static TypeHandle NullableTypeOfByref(TypeHandle th) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; if (th.GetVerifierCorElementType() != ELEMENT_TYPE_BYREF) return TypeHandle(); TypeHandle subType = th.AsTypeDesc()->GetTypeParam(); if (!Nullable::IsNullableType(subType)) return TypeHandle(); return subType; } static void TryCallMethodWorker(MethodDescCallSite* pMethodCallSite, ARG_SLOT* args, Frame* pDebuggerCatchFrame) { // Use static contracts b/c we have SEH. STATIC_CONTRACT_THROWS; STATIC_CONTRACT_GC_TRIGGERS; STATIC_CONTRACT_MODE_ANY; struct Param: public NotifyOfCHFFilterWrapperParam { MethodDescCallSite * pMethodCallSite; ARG_SLOT* args; } param; param.pFrame = pDebuggerCatchFrame; param.pMethodCallSite = pMethodCallSite; param.args = args; PAL_TRY(Param *, pParam, &param) { pParam->pMethodCallSite->CallWithValueTypes(pParam->args); } PAL_EXCEPT_FILTER(NotifyOfCHFFilterWrapper) { // Should never reach here b/c handler should always continue search. _ASSERTE(false); } PAL_ENDTRY } // Warning: This method has subtle differences from CallDescrWorkerReflectionWrapper // In particular that one captures watson bucket data and corrupting exception severity, // then transfers that data to the newly produced TargetInvocationException. This one // doesn't take those same steps. // static void TryCallMethod(MethodDescCallSite* pMethodCallSite, ARG_SLOT* args, bool wrapExceptions) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; } CONTRACTL_END; if (wrapExceptions) { OBJECTREF ppException = NULL; GCPROTECT_BEGIN(ppException); // The sole purpose of having this frame is to tell the debugger that we have a catch handler here // which may swallow managed exceptions. The debugger needs this in order to send a // CatchHandlerFound (CHF) notification. FrameWithCookie<DebuggerU2MCatchHandlerFrame> catchFrame; EX_TRY{ TryCallMethodWorker(pMethodCallSite, args, &catchFrame); } EX_CATCH{ ppException = GET_THROWABLE(); _ASSERTE(ppException); } EX_END_CATCH(RethrowTransientExceptions) catchFrame.Pop(); // It is important to re-throw outside the catch block because re-throwing will invoke // the jitter and managed code and will cause us to use more than the backout stack limit. if (ppException != NULL) { // If we get here we need to throw an TargetInvocationException OBJECTREF except = InvokeUtil::CreateTargetExcept(&ppException); COMPlusThrow(except); } GCPROTECT_END(); } else { pMethodCallSite->CallWithValueTypes(args); } } FCIMPL5(Object*, RuntimeFieldHandle::GetValue, ReflectFieldObject *pFieldUNSAFE, Object *instanceUNSAFE, ReflectClassBaseObject *pFieldTypeUNSAFE, ReflectClassBaseObject *pDeclaringTypeUNSAFE, CLR_BOOL *pDomainInitialized) { CONTRACTL { FCALL_CHECK; } CONTRACTL_END; struct _gc { OBJECTREF target; REFLECTCLASSBASEREF pFieldType; REFLECTCLASSBASEREF pDeclaringType; REFLECTFIELDREF refField; }gc; gc.target = ObjectToOBJECTREF(instanceUNSAFE); gc.pFieldType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pFieldTypeUNSAFE); gc.pDeclaringType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pDeclaringTypeUNSAFE); gc.refField = (REFLECTFIELDREF)ObjectToOBJECTREF(pFieldUNSAFE); if ((gc.pFieldType == NULL) || (gc.refField == NULL)) FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle")); TypeHandle fieldType = gc.pFieldType->GetType(); TypeHandle declaringType = (gc.pDeclaringType != NULL) ? gc.pDeclaringType->GetType() : TypeHandle(); Assembly *pAssem; if (declaringType.IsNull()) { // global field pAssem = gc.refField->GetField()->GetModule()->GetAssembly(); } else { pAssem = declaringType.GetAssembly(); } OBJECTREF rv = NULL; // not protected HELPER_METHOD_FRAME_BEGIN_RET_PROTECT(gc); // There can be no GC after this until the Object is returned. rv = InvokeUtil::GetFieldValue(gc.refField->GetField(), fieldType, &gc.target, declaringType, pDomainInitialized); HELPER_METHOD_FRAME_END(); return OBJECTREFToObject(rv); } FCIMPLEND FCIMPL2(FC_BOOL_RET, ReflectionInvocation::CanValueSpecialCast, ReflectClassBaseObject *pValueTypeUNSAFE, ReflectClassBaseObject *pTargetTypeUNSAFE) { CONTRACTL { FCALL_CHECK; PRECONDITION(CheckPointer(pValueTypeUNSAFE)); PRECONDITION(CheckPointer(pTargetTypeUNSAFE)); } CONTRACTL_END; REFLECTCLASSBASEREF refValueType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pValueTypeUNSAFE); REFLECTCLASSBASEREF refTargetType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pTargetTypeUNSAFE); TypeHandle valueType = refValueType->GetType(); TypeHandle targetType = refTargetType->GetType(); // we are here only if the target type is a primitive, an enum or a pointer CorElementType targetCorElement = targetType.GetVerifierCorElementType(); BOOL ret = TRUE; HELPER_METHOD_FRAME_BEGIN_RET_2(refValueType, refTargetType); // the field type is a pointer if (targetCorElement == ELEMENT_TYPE_PTR || targetCorElement == ELEMENT_TYPE_FNPTR) { // the object must be an IntPtr or a System.Reflection.Pointer if (valueType == TypeHandle(CoreLibBinder::GetClass(CLASS__INTPTR))) { // // it's an IntPtr, it's good. } // // it's a System.Reflection.Pointer object // void* assigns to any pointer. Otherwise the type of the pointer must match else if (!InvokeUtil::IsVoidPtr(targetType)) { if (!valueType.CanCastTo(targetType)) ret = FALSE; } } else { // the field type is an enum or a primitive. To have any chance of assignement the object type must // be an enum or primitive as well. // So get the internal cor element and that must be the same or widen CorElementType valueCorElement = valueType.GetVerifierCorElementType(); if (InvokeUtil::IsPrimitiveType(valueCorElement)) ret = (InvokeUtil::CanPrimitiveWiden(targetCorElement, valueCorElement)) ? TRUE : FALSE; else ret = FALSE; } HELPER_METHOD_FRAME_END(); FC_RETURN_BOOL(ret); } FCIMPLEND FCIMPL3(Object*, ReflectionInvocation::AllocateValueType, ReflectClassBaseObject *pTargetTypeUNSAFE, Object *valueUNSAFE, CLR_BOOL fForceTypeChange) { CONTRACTL { FCALL_CHECK; PRECONDITION(CheckPointer(pTargetTypeUNSAFE)); PRECONDITION(CheckPointer(valueUNSAFE, NULL_OK)); } CONTRACTL_END; struct _gc { REFLECTCLASSBASEREF refTargetType; OBJECTREF value; OBJECTREF obj; }gc; gc.value = ObjectToOBJECTREF(valueUNSAFE); gc.obj = gc.value; gc.refTargetType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pTargetTypeUNSAFE); TypeHandle targetType = gc.refTargetType->GetType(); HELPER_METHOD_FRAME_BEGIN_RET_PROTECT(gc); CorElementType targetElementType = targetType.GetSignatureCorElementType(); if (InvokeUtil::IsPrimitiveType(targetElementType) || targetElementType == ELEMENT_TYPE_VALUETYPE) { MethodTable* allocMT = targetType.AsMethodTable(); if (allocMT->IsByRefLike()) { COMPlusThrow(kNotSupportedException, W("NotSupported_ByRefLike")); } if (gc.value != NULL) { // ignore the type of the incoming box if fForceTypeChange is set // and the target type is not nullable if (!fForceTypeChange || Nullable::IsNullableType(targetType)) allocMT = gc.value->GetMethodTable(); } // for null Nullable<T> we don't want a default value being created. // just allow the null value to be passed, as it will be converted to // a true nullable if (!(gc.value == NULL && Nullable::IsNullableType(targetType))) { // boxed value type are 'read-only' in the sence that you can't // only the implementor of the value type can expose mutators. // To insure byrefs don't mutate value classes in place, we make // a copy (and if we were not given one, we create a null value type // instance. gc.obj = allocMT->Allocate(); if (gc.value != NULL) CopyValueClass(gc.obj->UnBox(), gc.value->UnBox(), allocMT); } } HELPER_METHOD_FRAME_END(); return OBJECTREFToObject(gc.obj); } FCIMPLEND FCIMPL7(void, RuntimeFieldHandle::SetValue, ReflectFieldObject *pFieldUNSAFE, Object *targetUNSAFE, Object *valueUNSAFE, ReflectClassBaseObject *pFieldTypeUNSAFE, DWORD attr, ReflectClassBaseObject *pDeclaringTypeUNSAFE, CLR_BOOL *pDomainInitialized) { CONTRACTL { FCALL_CHECK; } CONTRACTL_END; struct _gc { OBJECTREF target; OBJECTREF value; REFLECTCLASSBASEREF fieldType; REFLECTCLASSBASEREF declaringType; REFLECTFIELDREF refField; } gc; gc.target = ObjectToOBJECTREF(targetUNSAFE); gc.value = ObjectToOBJECTREF(valueUNSAFE); gc.fieldType= (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pFieldTypeUNSAFE); gc.declaringType= (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pDeclaringTypeUNSAFE); gc.refField = (REFLECTFIELDREF)ObjectToOBJECTREF(pFieldUNSAFE); if ((gc.fieldType == NULL) || (gc.refField == NULL)) FCThrowResVoid(kArgumentNullException, W("Arg_InvalidHandle")); TypeHandle fieldType = gc.fieldType->GetType(); TypeHandle declaringType = gc.declaringType != NULL ? gc.declaringType->GetType() : TypeHandle(); Assembly *pAssem; if (declaringType.IsNull()) { // global field pAssem = gc.refField->GetField()->GetModule()->GetAssembly(); } else { pAssem = declaringType.GetAssembly(); } FC_GC_POLL_NOT_NEEDED(); FieldDesc* pFieldDesc = gc.refField->GetField(); HELPER_METHOD_FRAME_BEGIN_PROTECT(gc); InvokeUtil::SetValidField(fieldType.GetVerifierCorElementType(), fieldType, pFieldDesc, &gc.target, &gc.value, declaringType, pDomainInitialized); HELPER_METHOD_FRAME_END(); } FCIMPLEND extern "C" void QCALLTYPE RuntimeTypeHandle_CreateInstanceForAnotherGenericParameter( QCall::TypeHandle pTypeHandle, TypeHandle* pInstArray, INT32 cInstArray, QCall::ObjectHandleOnStack pInstantiatedObject ) { CONTRACTL{ QCALL_CHECK; PRECONDITION(!pTypeHandle.AsTypeHandle().IsNull()); PRECONDITION(cInstArray >= 0); PRECONDITION(cInstArray == 0 || pInstArray != NULL); } CONTRACTL_END; TypeHandle genericType = pTypeHandle.AsTypeHandle(); BEGIN_QCALL; _ASSERTE (genericType.HasInstantiation()); TypeHandle instantiatedType = ((TypeHandle)genericType.GetCanonicalMethodTable()).Instantiate(Instantiation(pInstArray, (DWORD)cInstArray)); // Get the type information associated with refThis MethodTable* pVMT = instantiatedType.GetMethodTable(); _ASSERTE (pVMT != 0 && !instantiatedType.IsTypeDesc()); _ASSERTE( !pVMT->IsAbstract() ||! instantiatedType.ContainsGenericVariables()); _ASSERTE(!pVMT->IsByRefLike() && pVMT->HasDefaultConstructor()); // We've got the class, lets allocate it and call the constructor // Nullables don't take this path, if they do we need special logic to make an instance _ASSERTE(!Nullable::IsNullableType(instantiatedType)); { GCX_COOP(); OBJECTREF newObj = instantiatedType.GetMethodTable()->Allocate(); GCPROTECT_BEGIN(newObj); CallDefaultConstructor(newObj); GCPROTECT_END(); pInstantiatedObject.Set(newObj); } END_QCALL; } NOINLINE FC_BOOL_RET IsInstanceOfTypeHelper(OBJECTREF obj, REFLECTCLASSBASEREF refType) { FCALL_CONTRACT; BOOL canCast = false; FC_INNER_PROLOG(RuntimeTypeHandle::IsInstanceOfType); HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB_2(Frame::FRAME_ATTR_EXACT_DEPTH|Frame::FRAME_ATTR_CAPTURE_DEPTH_2, obj, refType); canCast = ObjIsInstanceOf(OBJECTREFToObject(obj), refType->GetType()); HELPER_METHOD_FRAME_END(); FC_RETURN_BOOL(canCast); } FCIMPL2(FC_BOOL_RET, RuntimeTypeHandle::IsInstanceOfType, ReflectClassBaseObject* pTypeUNSAFE, Object *objectUNSAFE) { FCALL_CONTRACT; OBJECTREF obj = ObjectToOBJECTREF(objectUNSAFE); REFLECTCLASSBASEREF refType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pTypeUNSAFE); // Null is not instance of anything in reflection world if (obj == NULL) FC_RETURN_BOOL(false); if (refType == NULL) FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle")); switch (ObjIsInstanceOfCached(objectUNSAFE, refType->GetType())) { case TypeHandle::CanCast: FC_RETURN_BOOL(true); case TypeHandle::CannotCast: FC_RETURN_BOOL(false); default: // fall through to the slow helper break; } FC_INNER_RETURN(FC_BOOL_RET, IsInstanceOfTypeHelper(obj, refType)); } FCIMPLEND /****************************************************************************/ /* boxed Nullable<T> are represented as a boxed T, so there is no unboxed Nullable<T> inside to point at by reference. Because of this a byref parameters of type Nullable<T> are copied out of the boxed instance (to a place on the stack), before the call is made (and this copy is pointed at). After the call returns, this copy must be copied back to the original argument array. ByRefToNullable, is a simple linked list that remembers what copy-backs are needed */ struct ByRefToNullable { unsigned argNum; // The argument number for this byrefNullable argument void* data; // The data to copy back to the ByRefNullable. This points to the stack TypeHandle type; // The type of Nullable for this argument ByRefToNullable* next; // list of these ByRefToNullable(unsigned aArgNum, void* aData, TypeHandle aType, ByRefToNullable* aNext) { argNum = aArgNum; data = aData; type = aType; next = aNext; } }; static void CallDescrWorkerReflectionWrapper(CallDescrData * pCallDescrData, Frame * pFrame) { // Use static contracts b/c we have SEH. STATIC_CONTRACT_THROWS; STATIC_CONTRACT_GC_TRIGGERS; STATIC_CONTRACT_MODE_ANY; struct Param: public NotifyOfCHFFilterWrapperParam { CallDescrData * pCallDescrData; } param; param.pFrame = pFrame; param.pCallDescrData = pCallDescrData; PAL_TRY(Param *, pParam, &param) { CallDescrWorkerWithHandler(pParam->pCallDescrData); } PAL_EXCEPT_FILTER(ReflectionInvocationExceptionFilter) { // Should never reach here b/c handler should always continue search. _ASSERTE(false); } PAL_ENDTRY } // CallDescrWorkerReflectionWrapper static OBJECTREF InvokeArrayConstructor(TypeHandle th, Span<OBJECTREF>* objs, int argCnt) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; } CONTRACTL_END; // Validate the argCnt an the Rank. Also allow nested SZARRAY's. _ASSERTE(argCnt == (int) th.GetRank() || argCnt == (int) th.GetRank() * 2 || th.GetInternalCorElementType() == ELEMENT_TYPE_SZARRAY); // Validate all of the parameters. These all typed as integers int allocSize = 0; if (!ClrSafeInt<int>::multiply(sizeof(INT32), argCnt, allocSize)) COMPlusThrow(kArgumentException, IDS_EE_SIGTOOCOMPLEX); INT32* indexes = (INT32*) _alloca((size_t)allocSize); ZeroMemory(indexes, allocSize); for (DWORD i=0; i<(DWORD)argCnt; i++) { if (!objs->GetAt(i)) COMPlusThrowArgumentException(W("parameters"), W("Arg_NullIndex")); MethodTable* pMT = objs->GetAt(i)->GetMethodTable(); CorElementType oType = TypeHandle(pMT).GetVerifierCorElementType(); if (!InvokeUtil::IsPrimitiveType(oType) || !InvokeUtil::CanPrimitiveWiden(ELEMENT_TYPE_I4,oType)) COMPlusThrow(kArgumentException,W("Arg_PrimWiden")); ARG_SLOT value; InvokeUtil::CreatePrimitiveValue(ELEMENT_TYPE_I4, oType, objs->GetAt(i), &value); memcpyNoGCRefs(indexes + i, ArgSlotEndianessFixup(&value, sizeof(INT32)), sizeof(INT32)); } return AllocateArrayEx(th, indexes, argCnt); } static BOOL IsActivationNeededForMethodInvoke(MethodDesc * pMD) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; } CONTRACTL_END; // The activation for non-generic instance methods is covered by non-null "this pointer" if (!pMD->IsStatic() && !pMD->HasMethodInstantiation() && !pMD->IsInterface()) return FALSE; // We need to activate the instance at least once pMD->EnsureActive(); return FALSE; } class ArgIteratorBaseForMethodInvoke { protected: SIGNATURENATIVEREF * m_ppNativeSig; bool m_fHasThis; FORCEINLINE CorElementType GetReturnType(TypeHandle * pthValueType) { WRAPPER_NO_CONTRACT; return (*pthValueType = (*m_ppNativeSig)->GetReturnTypeHandle()).GetInternalCorElementType(); } FORCEINLINE CorElementType GetNextArgumentType(DWORD iArg, TypeHandle * pthValueType) { WRAPPER_NO_CONTRACT; return (*pthValueType = (*m_ppNativeSig)->GetArgumentAt(iArg)).GetInternalCorElementType(); } FORCEINLINE void Reset() { LIMITED_METHOD_CONTRACT; } FORCEINLINE BOOL IsRegPassedStruct(MethodTable* pMT) { return pMT->IsRegPassedStruct(); } public: BOOL HasThis() { LIMITED_METHOD_CONTRACT; return m_fHasThis; } BOOL HasParamType() { LIMITED_METHOD_CONTRACT; // param type methods are not supported for reflection invoke, so HasParamType is always false for them return FALSE; } BOOL IsVarArg() { LIMITED_METHOD_CONTRACT; // vararg methods are not supported for reflection invoke, so IsVarArg is always false for them return FALSE; } DWORD NumFixedArgs() { LIMITED_METHOD_CONTRACT; return (*m_ppNativeSig)->NumFixedArgs(); } #ifdef FEATURE_INTERPRETER BYTE CallConv() { LIMITED_METHOD_CONTRACT; return IMAGE_CEE_CS_CALLCONV_DEFAULT; } #endif // FEATURE_INTERPRETER }; class ArgIteratorForMethodInvoke : public ArgIteratorTemplate<ArgIteratorBaseForMethodInvoke> { public: ArgIteratorForMethodInvoke(SIGNATURENATIVEREF * ppNativeSig, BOOL fCtorOfVariableSizedObject) { m_ppNativeSig = ppNativeSig; m_fHasThis = (*m_ppNativeSig)->HasThis() && !fCtorOfVariableSizedObject; DWORD dwFlags = (*m_ppNativeSig)->GetArgIteratorFlags(); // Use the cached values if they are available if (dwFlags & SIZE_OF_ARG_STACK_COMPUTED) { m_dwFlags = dwFlags; m_nSizeOfArgStack = (*m_ppNativeSig)->GetSizeOfArgStack(); return; } // // Compute flags and stack argument size, and cache them for next invocation // ForceSigWalk(); if (IsActivationNeededForMethodInvoke((*m_ppNativeSig)->GetMethod())) { m_dwFlags |= METHOD_INVOKE_NEEDS_ACTIVATION; } (*m_ppNativeSig)->SetSizeOfArgStack(m_nSizeOfArgStack); _ASSERTE((*m_ppNativeSig)->GetSizeOfArgStack() == m_nSizeOfArgStack); // This has to be last (*m_ppNativeSig)->SetArgIteratorFlags(m_dwFlags); _ASSERTE((*m_ppNativeSig)->GetArgIteratorFlags() == m_dwFlags); } BOOL IsActivationNeeded() { LIMITED_METHOD_CONTRACT; return (m_dwFlags & METHOD_INVOKE_NEEDS_ACTIVATION) != 0; } }; void DECLSPEC_NORETURN ThrowInvokeMethodException(MethodDesc * pMethod, OBJECTREF targetException) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; } CONTRACTL_END; GCPROTECT_BEGIN(targetException); #if defined(_DEBUG) && !defined(TARGET_UNIX) if (IsWatsonEnabled()) { if (!CLRException::IsPreallocatedExceptionObject(targetException)) { // If the exception is not preallocated, we should be having the // watson buckets in the throwable already. if(!((EXCEPTIONREF)targetException)->AreWatsonBucketsPresent()) { // If an exception is raised by the VM (e.g. type load exception by the JIT) and it comes // across the reflection invocation boundary before CLR's personality routine for managed // code has been invoked, then no buckets would be available for us at this point. // // Since we cannot assert this, better log it for diagnosis if required. LOG((LF_EH, LL_INFO100, "InvokeImpl - No watson buckets available - regular exception likely raised within VM and not seen by managed code.\n")); } } else { // Exception is preallocated. PTR_EHWatsonBucketTracker pUEWatsonBucketTracker = GetThread()->GetExceptionState()->GetUEWatsonBucketTracker(); if ((IsThrowableThreadAbortException(targetException) && pUEWatsonBucketTracker->CapturedForThreadAbort())|| (pUEWatsonBucketTracker->CapturedAtReflectionInvocation())) { // ReflectionInvocationExceptionFilter would have captured // the watson bucket details for preallocated exceptions // in the UE watson bucket tracker. if(pUEWatsonBucketTracker->RetrieveWatsonBuckets() == NULL) { // See comment above LOG((LF_EH, LL_INFO100, "InvokeImpl - No watson buckets available - preallocated exception likely raised within VM and not seen by managed code.\n")); } } } } #endif // _DEBUG && !TARGET_UNIX OBJECTREF except = InvokeUtil::CreateTargetExcept(&targetException); #ifndef TARGET_UNIX if (IsWatsonEnabled()) { struct { OBJECTREF oExcept; } gcTIE; ZeroMemory(&gcTIE, sizeof(gcTIE)); GCPROTECT_BEGIN(gcTIE); gcTIE.oExcept = except; _ASSERTE(!CLRException::IsPreallocatedExceptionObject(gcTIE.oExcept)); // If the original exception was preallocated, then copy over the captured // watson buckets to the TargetInvocationException object, if available. // // We dont need to do this if the original exception was not preallocated // since it already contains the watson buckets inside the object. if (CLRException::IsPreallocatedExceptionObject(targetException)) { PTR_EHWatsonBucketTracker pUEWatsonBucketTracker = GetThread()->GetExceptionState()->GetUEWatsonBucketTracker(); BOOL fCopyWatsonBuckets = TRUE; PTR_VOID pBuckets = pUEWatsonBucketTracker->RetrieveWatsonBuckets(); if (pBuckets != NULL) { // Copy the buckets to the exception object CopyWatsonBucketsToThrowable(pBuckets, gcTIE.oExcept); // Confirm that they are present. _ASSERTE(((EXCEPTIONREF)gcTIE.oExcept)->AreWatsonBucketsPresent()); } // Clear the UE watson bucket tracker since the bucketing // details are now in the TargetInvocationException object. pUEWatsonBucketTracker->ClearWatsonBucketDetails(); } // update "except" incase the reference to the object // was updated by the GC except = gcTIE.oExcept; GCPROTECT_END(); } #endif // !TARGET_UNIX // Since the original exception is inner of target invocation exception, // when TIE is seen to be raised for the first time, we will end up // using the inner exception buckets automatically. // Since VM is throwing the exception, we set it to use the same corruption severity // that the original exception came in with from reflection invocation. COMPlusThrow(except); GCPROTECT_END(); } FCIMPL5(Object*, RuntimeMethodHandle::InvokeMethod, Object *target, Span<OBJECTREF>* objs, SignatureNative* pSigUNSAFE, CLR_BOOL fConstructor, CLR_BOOL fWrapExceptions) { FCALL_CONTRACT; struct { OBJECTREF target; SIGNATURENATIVEREF pSig; OBJECTREF retVal; } gc; gc.target = ObjectToOBJECTREF(target); gc.pSig = (SIGNATURENATIVEREF)pSigUNSAFE; gc.retVal = NULL; MethodDesc* pMeth = gc.pSig->GetMethod(); TypeHandle ownerType = gc.pSig->GetDeclaringType(); HELPER_METHOD_FRAME_BEGIN_RET_PROTECT(gc); if (ownerType.IsSharedByGenericInstantiations()) COMPlusThrow(kNotSupportedException, W("NotSupported_Type")); #ifdef _DEBUG if (g_pConfig->ShouldInvokeHalt(pMeth)) { _ASSERTE(!"InvokeHalt"); } #endif BOOL fCtorOfVariableSizedObject = FALSE; if (fConstructor) { // If we are invoking a constructor on an array then we must // handle this specially. if (ownerType.IsArray()) { gc.retVal = InvokeArrayConstructor(ownerType, objs, gc.pSig->NumFixedArgs()); goto Done; } // Variable sized objects, like String instances, allocate themselves // so they are a special case. MethodTable * pMT = ownerType.AsMethodTable(); fCtorOfVariableSizedObject = pMT->HasComponentSize(); if (!fCtorOfVariableSizedObject) gc.retVal = pMT->Allocate(); } { ArgIteratorForMethodInvoke argit(&gc.pSig, fCtorOfVariableSizedObject); if (argit.IsActivationNeeded()) pMeth->EnsureActive(); CONSISTENCY_CHECK(pMeth->CheckActivated()); UINT nStackBytes = argit.SizeOfFrameArgumentArray(); // Note that SizeOfFrameArgumentArray does overflow checks with sufficient margin to prevent overflows here SIZE_T nAllocaSize = TransitionBlock::GetNegSpaceSize() + sizeof(TransitionBlock) + nStackBytes; Thread * pThread = GET_THREAD(); LPBYTE pAlloc = (LPBYTE)_alloca(nAllocaSize); LPBYTE pTransitionBlock = pAlloc + TransitionBlock::GetNegSpaceSize(); CallDescrData callDescrData; callDescrData.pSrc = pTransitionBlock + sizeof(TransitionBlock); _ASSERTE((nStackBytes % TARGET_POINTER_SIZE) == 0); callDescrData.numStackSlots = nStackBytes / TARGET_POINTER_SIZE; #ifdef CALLDESCR_ARGREGS callDescrData.pArgumentRegisters = (ArgumentRegisters*)(pTransitionBlock + TransitionBlock::GetOffsetOfArgumentRegisters()); #endif #ifdef CALLDESCR_RETBUFFARGREG callDescrData.pRetBuffArg = (UINT64*)(pTransitionBlock + TransitionBlock::GetOffsetOfRetBuffArgReg()); #endif #ifdef CALLDESCR_FPARGREGS callDescrData.pFloatArgumentRegisters = NULL; #endif #ifdef CALLDESCR_REGTYPEMAP callDescrData.dwRegTypeMap = 0; #endif callDescrData.fpReturnSize = argit.GetFPReturnSize(); // This is duplicated logic from MethodDesc::GetCallTarget PCODE pTarget; if (pMeth->IsVtableMethod()) { pTarget = pMeth->GetSingleCallableAddrOfVirtualizedCode(&gc.target, ownerType); } else { pTarget = pMeth->GetSingleCallableAddrOfCode(); } callDescrData.pTarget = pTarget; // Build the arguments on the stack GCStress<cfg_any>::MaybeTrigger(); FrameWithCookie<ProtectValueClassFrame> *pProtectValueClassFrame = NULL; ValueClassInfo *pValueClasses = NULL; ByRefToNullable* byRefToNullables = NULL; // if we have the magic Value Class return, we need to allocate that class // and place a pointer to it on the stack. BOOL hasRefReturnAndNeedsBoxing = FALSE; // Indicates that the method has a BYREF return type and the target type needs to be copied into a preallocated boxed object. TypeHandle retTH = gc.pSig->GetReturnTypeHandle(); TypeHandle refReturnTargetTH; // Valid only if retType == ELEMENT_TYPE_BYREF. Caches the TypeHandle of the byref target. BOOL fHasRetBuffArg = argit.HasRetBuffArg(); CorElementType retType = retTH.GetSignatureCorElementType(); BOOL hasValueTypeReturn = retTH.IsValueType() && retType != ELEMENT_TYPE_VOID; _ASSERTE(hasValueTypeReturn || !fHasRetBuffArg); // only valuetypes are returned via a return buffer. if (hasValueTypeReturn) { gc.retVal = retTH.GetMethodTable()->Allocate(); } else if (retType == ELEMENT_TYPE_BYREF) { refReturnTargetTH = retTH.AsTypeDesc()->GetTypeParam(); // If the target of the byref is a value type, we need to preallocate a boxed object to hold the managed return value. if (refReturnTargetTH.IsValueType()) { _ASSERTE(refReturnTargetTH.GetSignatureCorElementType() != ELEMENT_TYPE_VOID); // Managed Reflection layer has a bouncer for "ref void" returns. hasRefReturnAndNeedsBoxing = TRUE; gc.retVal = refReturnTargetTH.GetMethodTable()->Allocate(); } } // Copy "this" pointer if (!pMeth->IsStatic() && !fCtorOfVariableSizedObject) { PVOID pThisPtr; if (fConstructor) { // Copy "this" pointer: only unbox if type is value type and method is not unboxing stub if (ownerType.IsValueType() && !pMeth->IsUnboxingStub()) { // Note that we create a true boxed nullabe<T> and then convert it to a T below pThisPtr = gc.retVal->GetData(); } else pThisPtr = OBJECTREFToObject(gc.retVal); } else if (!pMeth->GetMethodTable()->IsValueType()) pThisPtr = OBJECTREFToObject(gc.target); else { if (pMeth->IsUnboxingStub()) pThisPtr = OBJECTREFToObject(gc.target); else { // Create a true boxed Nullable<T> and use that as the 'this' pointer. // since what is passed in is just a boxed T MethodTable* pMT = pMeth->GetMethodTable(); if (Nullable::IsNullableType(pMT)) { OBJECTREF bufferObj = pMT->Allocate(); void* buffer = bufferObj->GetData(); Nullable::UnBox(buffer, gc.target, pMT); pThisPtr = buffer; } else pThisPtr = gc.target->UnBox(); } } *((LPVOID*) (pTransitionBlock + argit.GetThisOffset())) = pThisPtr; } // NO GC AFTER THIS POINT. The object references in the method frame are not protected. // // We have already copied "this" pointer so we do not want GC to happen even sooner. Unfortunately, // we may allocate in the process of copying this pointer that makes it hard to express using contracts. // // If an exception occurs a gc may happen but we are going to dump the stack anyway and we do // not need to protect anything. { BEGINFORBIDGC(); #ifdef _DEBUG GCForbidLoaderUseHolder forbidLoaderUse; #endif // Take care of any return arguments if (fHasRetBuffArg) { PVOID pRetBuff = gc.retVal->GetData(); *((LPVOID*) (pTransitionBlock + argit.GetRetBuffArgOffset())) = pRetBuff; } // copy args UINT nNumArgs = gc.pSig->NumFixedArgs(); for (UINT i = 0 ; i < nNumArgs; i++) { TypeHandle th = gc.pSig->GetArgumentAt(i); int ofs = argit.GetNextOffset(); _ASSERTE(ofs != TransitionBlock::InvalidOffset); #ifdef CALLDESCR_REGTYPEMAP FillInRegTypeMap(ofs, argit.GetArgType(), (BYTE *)&callDescrData.dwRegTypeMap); #endif #ifdef CALLDESCR_FPARGREGS // Under CALLDESCR_FPARGREGS -ve offsets indicate arguments in floating point registers. If we have at // least one such argument we point the call worker at the floating point area of the frame (we leave // it null otherwise since the worker can perform a useful optimization if it knows no floating point // registers need to be set up). if (TransitionBlock::HasFloatRegister(ofs, argit.GetArgLocDescForStructInRegs()) && (callDescrData.pFloatArgumentRegisters == NULL)) { callDescrData.pFloatArgumentRegisters = (FloatArgumentRegisters*) (pTransitionBlock + TransitionBlock::GetOffsetOfFloatArgumentRegisters()); } #endif UINT structSize = argit.GetArgSize(); bool needsStackCopy = false; // A boxed Nullable<T> is represented as boxed T. So to pass a Nullable<T> by reference, // we have to create a Nullable<T> on stack, copy the T into it, then pass it to the callee and // after returning from the call, copy the T out of the Nullable<T> back to the boxed T. TypeHandle nullableType = NullableTypeOfByref(th); if (!nullableType.IsNull()) { th = nullableType; structSize = th.GetSize(); needsStackCopy = true; } #ifdef ENREGISTERED_PARAMTYPE_MAXSIZE else if (argit.IsArgPassedByRef()) { needsStackCopy = true; } #endif ArgDestination argDest(pTransitionBlock, ofs, argit.GetArgLocDescForStructInRegs()); if(needsStackCopy) { MethodTable * pMT = th.GetMethodTable(); _ASSERTE(pMT && pMT->IsValueType()); PVOID pArgDst = argDest.GetDestinationAddress(); PVOID pStackCopy = _alloca(structSize); *(PVOID *)pArgDst = pStackCopy; pArgDst = pStackCopy; if (!nullableType.IsNull()) { byRefToNullables = new(_alloca(sizeof(ByRefToNullable))) ByRefToNullable(i, pStackCopy, nullableType, byRefToNullables); } // save the info into ValueClassInfo if (pMT->ContainsPointers()) { pValueClasses = new (_alloca(sizeof(ValueClassInfo))) ValueClassInfo(pStackCopy, pMT, pValueClasses); } // We need a new ArgDestination that points to the stack copy argDest = ArgDestination(pStackCopy, 0, NULL); } InvokeUtil::CopyArg(th, &objs->GetAt(i), &argDest); } ENDFORBIDGC(); } if (pValueClasses != NULL) { pProtectValueClassFrame = new (_alloca (sizeof (FrameWithCookie<ProtectValueClassFrame>))) FrameWithCookie<ProtectValueClassFrame>(pThread, pValueClasses); } // Call the method bool fExceptionThrown = false; if (fWrapExceptions) { // The sole purpose of having this frame is to tell the debugger that we have a catch handler here // which may swallow managed exceptions. The debugger needs this in order to send a // CatchHandlerFound (CHF) notification. FrameWithCookie<DebuggerU2MCatchHandlerFrame> catchFrame(pThread); EX_TRY_THREAD(pThread) { CallDescrWorkerReflectionWrapper(&callDescrData, &catchFrame); } EX_CATCH{ // Rethrow transient exceptions for constructors for backward compatibility if (fConstructor && GET_EXCEPTION()->IsTransient()) { EX_RETHROW; } // Abuse retval to store the exception object gc.retVal = GET_THROWABLE(); _ASSERTE(gc.retVal); fExceptionThrown = true; } EX_END_CATCH(SwallowAllExceptions); catchFrame.Pop(pThread); } else { CallDescrWorkerWithHandler(&callDescrData); } // Now that we are safely out of the catch block, we can create and raise the // TargetInvocationException. if (fExceptionThrown) { ThrowInvokeMethodException(pMeth, gc.retVal); } // It is still illegal to do a GC here. The return type might have/contain GC pointers. if (fConstructor) { // We have a special case for Strings...The object is returned... if (fCtorOfVariableSizedObject) { PVOID pReturnValue = &callDescrData.returnValue; gc.retVal = *(OBJECTREF *)pReturnValue; } // If it is a Nullable<T>, box it using Nullable<T> conventions. // TODO: this double allocates on constructions which is wasteful gc.retVal = Nullable::NormalizeBox(gc.retVal); } else if (hasValueTypeReturn || hasRefReturnAndNeedsBoxing) { _ASSERTE(gc.retVal != NULL); if (hasRefReturnAndNeedsBoxing) { // Method has BYREF return and the target type is one that needs boxing. We need to copy into the boxed object we have allocated for this purpose. LPVOID pReturnedReference = *(LPVOID*)&callDescrData.returnValue; if (pReturnedReference == NULL) { COMPlusThrow(kNullReferenceException, W("NullReference_InvokeNullRefReturned")); } CopyValueClass(gc.retVal->GetData(), pReturnedReference, gc.retVal->GetMethodTable()); } // if the structure is returned by value, then we need to copy in the boxed object // we have allocated for this purpose. else if (!fHasRetBuffArg) { CopyValueClass(gc.retVal->GetData(), &callDescrData.returnValue, gc.retVal->GetMethodTable()); } // From here on out, it is OK to have GCs since the return object (which may have had // GC pointers has been put into a GC object and thus protected. // TODO this creates two objects which is inefficient // If the return type is a Nullable<T> box it into the correct form gc.retVal = Nullable::NormalizeBox(gc.retVal); } else if (retType == ELEMENT_TYPE_BYREF) { // WARNING: pReturnedReference is an unprotected inner reference so we must not trigger a GC until the referenced value has been safely captured. LPVOID pReturnedReference = *(LPVOID*)&callDescrData.returnValue; if (pReturnedReference == NULL) { COMPlusThrow(kNullReferenceException, W("NullReference_InvokeNullRefReturned")); } gc.retVal = InvokeUtil::CreateObjectAfterInvoke(refReturnTargetTH, pReturnedReference); } else { gc.retVal = InvokeUtil::CreateObjectAfterInvoke(retTH, &callDescrData.returnValue); } while (byRefToNullables != NULL) { OBJECTREF obj = Nullable::Box(byRefToNullables->data, byRefToNullables->type.GetMethodTable()); SetObjectReference(&objs->GetAt(byRefToNullables->argNum), obj); byRefToNullables = byRefToNullables->next; } if (pProtectValueClassFrame != NULL) pProtectValueClassFrame->Pop(pThread); } Done: ; HELPER_METHOD_FRAME_END(); return OBJECTREFToObject(gc.retVal); } FCIMPLEND struct SkipStruct { StackCrawlMark* pStackMark; MethodDesc* pMeth; }; // This method is called by the GetMethod function and will crawl backward // up the stack for integer methods. static StackWalkAction SkipMethods(CrawlFrame* frame, VOID* data) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; SkipStruct* pSkip = (SkipStruct*) data; MethodDesc *pFunc = frame->GetFunction(); /* We asked to be called back only for functions */ _ASSERTE(pFunc); // The check here is between the address of a local variable // (the stack mark) and a pointer to the EIP for a frame // (which is actually the pointer to the return address to the // function from the previous frame). So we'll actually notice // which frame the stack mark was in one frame later. This is // fine since we only implement LookForMyCaller. _ASSERTE(*pSkip->pStackMark == LookForMyCaller); if (!frame->IsInCalleesFrames(pSkip->pStackMark)) return SWA_CONTINUE; if (pFunc->RequiresInstMethodDescArg()) { pSkip->pMeth = (MethodDesc *) frame->GetParamTypeArg(); if (pSkip->pMeth == NULL) pSkip->pMeth = pFunc; } else pSkip->pMeth = pFunc; return SWA_ABORT; } // Return the MethodInfo that represents the current method (two above this one) FCIMPL1(ReflectMethodObject*, RuntimeMethodHandle::GetCurrentMethod, StackCrawlMark* stackMark) { FCALL_CONTRACT; REFLECTMETHODREF pRet = NULL; HELPER_METHOD_FRAME_BEGIN_RET_0(); SkipStruct skip; skip.pStackMark = stackMark; skip.pMeth = 0; StackWalkFunctions(GetThread(), SkipMethods, &skip); // If C<Foo>.m<Bar> was called, the stack walker returns C<object>.m<object>. We cannot // get know that the instantiation used Foo or Bar at that point. So the next best thing // is to return C<T>.m<P> and that's what LoadTypicalMethodDefinition will do for us. if (skip.pMeth != NULL) pRet = skip.pMeth->LoadTypicalMethodDefinition()->GetStubMethodInfo(); else pRet = NULL; HELPER_METHOD_FRAME_END(); return (ReflectMethodObject*)OBJECTREFToObject(pRet); } FCIMPLEND static OBJECTREF DirectObjectFieldGet(FieldDesc *pField, TypeHandle fieldType, TypeHandle enclosingType, TypedByRef *pTarget, CLR_BOOL *pDomainInitialized) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; PRECONDITION(CheckPointer(pField)); } CONTRACTL_END; OBJECTREF refRet; OBJECTREF objref = NULL; GCPROTECT_BEGIN(objref); if (!pField->IsStatic()) { objref = ObjectToOBJECTREF(*((Object**)pTarget->data)); } InvokeUtil::ValidateObjectTarget(pField, enclosingType, &objref); refRet = InvokeUtil::GetFieldValue(pField, fieldType, &objref, enclosingType, pDomainInitialized); GCPROTECT_END(); return refRet; } FCIMPL4(Object*, RuntimeFieldHandle::GetValueDirect, ReflectFieldObject *pFieldUNSAFE, ReflectClassBaseObject *pFieldTypeUNSAFE, TypedByRef *pTarget, ReflectClassBaseObject *pDeclaringTypeUNSAFE) { CONTRACTL { FCALL_CHECK; } CONTRACTL_END; struct { REFLECTCLASSBASEREF refFieldType; REFLECTCLASSBASEREF refDeclaringType; REFLECTFIELDREF refField; }gc; gc.refFieldType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pFieldTypeUNSAFE); gc.refDeclaringType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pDeclaringTypeUNSAFE); gc.refField = (REFLECTFIELDREF)ObjectToOBJECTREF(pFieldUNSAFE); if ((gc.refFieldType == NULL) || (gc.refField == NULL)) FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle")); TypeHandle fieldType = gc.refFieldType->GetType(); FieldDesc *pField = gc.refField->GetField(); Assembly *pAssem = pField->GetModule()->GetAssembly(); OBJECTREF refRet = NULL; CorElementType fieldElType; HELPER_METHOD_FRAME_BEGIN_RET_PROTECT(gc); // Find the Object and its type TypeHandle targetType = pTarget->type; _ASSERTE(gc.refDeclaringType == NULL || !gc.refDeclaringType->GetType().IsTypeDesc()); MethodTable *pEnclosingMT = (gc.refDeclaringType != NULL ? gc.refDeclaringType->GetType() : TypeHandle()).AsMethodTable(); CLR_BOOL domainInitialized = FALSE; if (pField->IsStatic() || !targetType.IsValueType()) { refRet = DirectObjectFieldGet(pField, fieldType, TypeHandle(pEnclosingMT), pTarget, &domainInitialized); goto lExit; } // Validate that the target type can be cast to the type that owns this field info. if (!targetType.CanCastTo(TypeHandle(pEnclosingMT))) COMPlusThrowArgumentException(W("obj"), NULL); // This is a workaround because from the previous case we may end up with an // Enum. We want to process it here. // Get the value from the field void* p; fieldElType = fieldType.GetSignatureCorElementType(); switch (fieldElType) { case ELEMENT_TYPE_VOID: _ASSERTE(!"Void used as Field Type!"); COMPlusThrow(kInvalidProgramException); case ELEMENT_TYPE_BOOLEAN: // boolean case ELEMENT_TYPE_I1: // byte case ELEMENT_TYPE_U1: // unsigned byte case ELEMENT_TYPE_I2: // short case ELEMENT_TYPE_U2: // unsigned short case ELEMENT_TYPE_CHAR: // char case ELEMENT_TYPE_I4: // int case ELEMENT_TYPE_U4: // unsigned int case ELEMENT_TYPE_I: case ELEMENT_TYPE_U: case ELEMENT_TYPE_R4: // float case ELEMENT_TYPE_I8: // long case ELEMENT_TYPE_U8: // unsigned long case ELEMENT_TYPE_R8: // double case ELEMENT_TYPE_VALUETYPE: _ASSERTE(!fieldType.IsTypeDesc()); p = ((BYTE*) pTarget->data) + pField->GetOffset(); refRet = fieldType.AsMethodTable()->Box(p); break; case ELEMENT_TYPE_OBJECT: case ELEMENT_TYPE_CLASS: case ELEMENT_TYPE_SZARRAY: // Single Dim, Zero case ELEMENT_TYPE_ARRAY: // general array p = ((BYTE*) pTarget->data) + pField->GetOffset(); refRet = ObjectToOBJECTREF(*(Object**) p); break; case ELEMENT_TYPE_PTR: { p = ((BYTE*) pTarget->data) + pField->GetOffset(); refRet = InvokeUtil::CreatePointer(fieldType, *(void **)p); break; } default: _ASSERTE(!"Unknown Type"); // this is really an impossible condition COMPlusThrow(kNotSupportedException); } lExit: ; HELPER_METHOD_FRAME_END(); return OBJECTREFToObject(refRet); } FCIMPLEND static void DirectObjectFieldSet(FieldDesc *pField, TypeHandle fieldType, TypeHandle enclosingType, TypedByRef *pTarget, OBJECTREF *pValue, CLR_BOOL *pDomainInitialized) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; PRECONDITION(CheckPointer(pField)); PRECONDITION(!fieldType.IsNull()); } CONTRACTL_END; OBJECTREF objref = NULL; GCPROTECT_BEGIN(objref); if (!pField->IsStatic()) { objref = ObjectToOBJECTREF(*((Object**)pTarget->data)); } // Validate the target/fld type relationship InvokeUtil::ValidateObjectTarget(pField, enclosingType, &objref); InvokeUtil::SetValidField(pField->GetFieldType(), fieldType, pField, &objref, pValue, enclosingType, pDomainInitialized); GCPROTECT_END(); } FCIMPL5(void, RuntimeFieldHandle::SetValueDirect, ReflectFieldObject *pFieldUNSAFE, ReflectClassBaseObject *pFieldTypeUNSAFE, TypedByRef *pTarget, Object *valueUNSAFE, ReflectClassBaseObject *pContextTypeUNSAFE) { CONTRACTL { FCALL_CHECK; } CONTRACTL_END; struct _gc { OBJECTREF oValue; REFLECTCLASSBASEREF pFieldType; REFLECTCLASSBASEREF pContextType; REFLECTFIELDREF refField; }gc; gc.oValue = ObjectToOBJECTREF(valueUNSAFE); gc.pFieldType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pFieldTypeUNSAFE); gc.pContextType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pContextTypeUNSAFE); gc.refField = (REFLECTFIELDREF)ObjectToOBJECTREF(pFieldUNSAFE); if ((gc.pFieldType == NULL) || (gc.refField == NULL)) FCThrowResVoid(kArgumentNullException, W("Arg_InvalidHandle")); TypeHandle fieldType = gc.pFieldType->GetType(); TypeHandle contextType = (gc.pContextType != NULL) ? gc.pContextType->GetType() : NULL; FieldDesc *pField = gc.refField->GetField(); Assembly *pAssem = pField->GetModule()->GetAssembly(); BYTE *pDst = NULL; ARG_SLOT value = NULL; CorElementType fieldElType; HELPER_METHOD_FRAME_BEGIN_PROTECT(gc); // Find the Object and its type TypeHandle targetType = pTarget->type; MethodTable *pEnclosingMT = contextType.GetMethodTable(); // Verify that the value passed can be widened into the target InvokeUtil::ValidField(fieldType, &gc.oValue); CLR_BOOL domainInitialized = FALSE; if (pField->IsStatic() || !targetType.IsValueType()) { DirectObjectFieldSet(pField, fieldType, TypeHandle(pEnclosingMT), pTarget, &gc.oValue, &domainInitialized); goto lExit; } if (gc.oValue == NULL && fieldType.IsValueType() && !Nullable::IsNullableType(fieldType)) COMPlusThrowArgumentNull(W("value")); // Validate that the target type can be cast to the type that owns this field info. if (!targetType.CanCastTo(TypeHandle(pEnclosingMT))) COMPlusThrowArgumentException(W("obj"), NULL); // Set the field fieldElType = fieldType.GetInternalCorElementType(); if (ELEMENT_TYPE_BOOLEAN <= fieldElType && fieldElType <= ELEMENT_TYPE_R8) { CorElementType objType = gc.oValue->GetTypeHandle().GetInternalCorElementType(); if (objType != fieldElType) InvokeUtil::CreatePrimitiveValue(fieldElType, objType, gc.oValue, &value); else value = *(ARG_SLOT*)gc.oValue->UnBox(); } pDst = ((BYTE*) pTarget->data) + pField->GetOffset(); switch (fieldElType) { case ELEMENT_TYPE_VOID: _ASSERTE(!"Void used as Field Type!"); COMPlusThrow(kInvalidProgramException); case ELEMENT_TYPE_BOOLEAN: // boolean case ELEMENT_TYPE_I1: // byte case ELEMENT_TYPE_U1: // unsigned byte VolatileStore((UINT8*)pDst, *(UINT8*)&value); break; case ELEMENT_TYPE_I2: // short case ELEMENT_TYPE_U2: // unsigned short case ELEMENT_TYPE_CHAR: // char VolatileStore((UINT16*)pDst, *(UINT16*)&value); break; case ELEMENT_TYPE_I4: // int case ELEMENT_TYPE_U4: // unsigned int case ELEMENT_TYPE_R4: // float VolatileStore((UINT32*)pDst, *(UINT32*)&value); break; case ELEMENT_TYPE_I8: // long case ELEMENT_TYPE_U8: // unsigned long case ELEMENT_TYPE_R8: // double VolatileStore((UINT64*)pDst, *(UINT64*)&value); break; case ELEMENT_TYPE_I: { INT_PTR valuePtr = (INT_PTR) InvokeUtil::GetIntPtrValue(gc.oValue); VolatileStore((INT_PTR*) pDst, valuePtr); } break; case ELEMENT_TYPE_U: { UINT_PTR valuePtr = (UINT_PTR) InvokeUtil::GetIntPtrValue(gc.oValue); VolatileStore((UINT_PTR*) pDst, valuePtr); } break; case ELEMENT_TYPE_PTR: // pointers if (gc.oValue != 0) { value = 0; if (CoreLibBinder::IsClass(gc.oValue->GetMethodTable(), CLASS__POINTER)) { value = (SIZE_T) InvokeUtil::GetPointerValue(gc.oValue); VolatileStore((SIZE_T*) pDst, (SIZE_T) value); break; } } FALLTHROUGH; case ELEMENT_TYPE_FNPTR: { value = 0; if (gc.oValue != 0) { CorElementType objType = gc.oValue->GetTypeHandle().GetInternalCorElementType(); InvokeUtil::CreatePrimitiveValue(objType, objType, gc.oValue, &value); } VolatileStore((SIZE_T*) pDst, (SIZE_T) value); } break; case ELEMENT_TYPE_SZARRAY: // Single Dim, Zero case ELEMENT_TYPE_ARRAY: // General Array case ELEMENT_TYPE_CLASS: case ELEMENT_TYPE_OBJECT: SetObjectReference((OBJECTREF*)pDst, gc.oValue); break; case ELEMENT_TYPE_VALUETYPE: { _ASSERTE(!fieldType.IsTypeDesc()); MethodTable* pMT = fieldType.AsMethodTable(); // If we have a null value then we must create an empty field if (gc.oValue == 0) InitValueClass(pDst, pMT); else { pMT->UnBoxIntoUnchecked(pDst, gc.oValue); } } break; default: _ASSERTE(!"Unknown Type"); // this is really an impossible condition COMPlusThrow(kNotSupportedException); } lExit: ; HELPER_METHOD_FRAME_END(); } FCIMPLEND extern "C" void QCALLTYPE ReflectionInvocation_CompileMethod(MethodDesc * pMD) { QCALL_CONTRACT; // Argument is checked on the managed side PRECONDITION(pMD != NULL); if (!pMD->IsPointingToPrestub()) return; BEGIN_QCALL; pMD->DoPrestub(NULL); END_QCALL; } // This method triggers the class constructor for a give type extern "C" void QCALLTYPE ReflectionInvocation_RunClassConstructor(QCall::TypeHandle pType) { QCALL_CONTRACT; TypeHandle typeHnd = pType.AsTypeHandle(); if (typeHnd.IsTypeDesc()) return; MethodTable *pMT = typeHnd.AsMethodTable(); if (pMT->IsClassInited()) return; BEGIN_QCALL; pMT->CheckRestore(); pMT->EnsureInstanceActive(); pMT->CheckRunClassInitThrowing(); END_QCALL; } // This method triggers the module constructor for a given module extern "C" void QCALLTYPE ReflectionInvocation_RunModuleConstructor(QCall::ModuleHandle pModule) { QCALL_CONTRACT; DomainAssembly *pDomainAssembly = pModule->GetDomainAssembly(); if (pDomainAssembly != NULL && pDomainAssembly->IsActive()) return; BEGIN_QCALL; pDomainAssembly->EnsureActive(); END_QCALL; } static void PrepareMethodHelper(MethodDesc * pMD) { STANDARD_VM_CONTRACT; pMD->EnsureActive(); if (pMD->IsPointingToPrestub()) pMD->DoPrestub(NULL); if (pMD->IsWrapperStub()) { pMD = pMD->GetWrappedMethodDesc(); if (pMD->IsPointingToPrestub()) pMD->DoPrestub(NULL); } } // This method triggers a given method to be jitted. CoreCLR implementation of this method triggers jiting of the given method only. // It does not walk a subset of callgraph to provide CER guarantees. extern "C" void QCALLTYPE ReflectionInvocation_PrepareMethod(MethodDesc *pMD, TypeHandle *pInstantiation, UINT32 cInstantiation) { CONTRACTL { QCALL_CHECK; PRECONDITION(pMD != NULL); PRECONDITION(CheckPointer(pInstantiation, NULL_OK)); } CONTRACTL_END; BEGIN_QCALL; if (pMD->IsAbstract()) COMPlusThrow(kArgumentException, W("Argument_CannotPrepareAbstract")); MethodTable * pExactMT = pMD->GetMethodTable(); if (pInstantiation != NULL) { // We were handed an instantiation, check that the method expects it and the right number of types has been provided (the // caller supplies one array containing the class instantiation immediately followed by the method instantiation). if (cInstantiation != (pMD->GetNumGenericMethodArgs() + pMD->GetNumGenericClassArgs())) COMPlusThrow(kArgumentException, W("Argument_InvalidGenericInstantiation")); // Check we've got a reasonable looking instantiation. if (!Generics::CheckInstantiation(Instantiation(pInstantiation, cInstantiation))) COMPlusThrow(kArgumentException, W("Argument_InvalidGenericInstantiation")); for (ULONG i = 0; i < cInstantiation; i++) if (pInstantiation[i].ContainsGenericVariables()) COMPlusThrow(kArgumentException, W("Argument_InvalidGenericInstantiation")); TypeHandle thExactType = ClassLoader::LoadGenericInstantiationThrowing(pMD->GetModule(), pMD->GetMethodTable()->GetCl(), Instantiation(pInstantiation, pMD->GetNumGenericClassArgs())); pExactMT = thExactType.AsMethodTable(); pMD = MethodDesc::FindOrCreateAssociatedMethodDesc(pMD, pExactMT, FALSE, Instantiation(&pInstantiation[pMD->GetNumGenericClassArgs()], pMD->GetNumGenericMethodArgs()), FALSE); } if (pMD->ContainsGenericVariables()) COMPlusThrow(kArgumentException, W("Argument_InvalidGenericInstantiation")); PrepareMethodHelper(pMD); END_QCALL; } // This method triggers target of a given method to be jitted. CoreCLR implementation of this method triggers jiting // of the given method only. It does not walk a subset of callgraph to provide CER guarantees. // In the case of a multi-cast delegate, we rely on the fact that each individual component // was prepared prior to the Combine. FCIMPL1(void, ReflectionInvocation::PrepareDelegate, Object* delegateUNSAFE) { CONTRACTL { FCALL_CHECK; PRECONDITION(CheckPointer(delegateUNSAFE, NULL_OK)); } CONTRACTL_END; if (delegateUNSAFE == NULL) return; OBJECTREF delegate = ObjectToOBJECTREF(delegateUNSAFE); HELPER_METHOD_FRAME_BEGIN_1(delegate); MethodDesc *pMD = COMDelegate::GetMethodDesc(delegate); GCX_PREEMP(); PrepareMethodHelper(pMD); HELPER_METHOD_FRAME_END(); } FCIMPLEND // This method checks to see if there is sufficient stack to execute the average Framework method. // If there is not, then it throws System.InsufficientExecutionStackException. The limit for each // thread is precomputed when the thread is created. FCIMPL0(void, ReflectionInvocation::EnsureSufficientExecutionStack) { FCALL_CONTRACT; Thread *pThread = GetThread(); // We use the address of a local variable as our "current stack pointer", which is // plenty close enough for the purposes of this method. UINT_PTR current = reinterpret_cast<UINT_PTR>(&pThread); UINT_PTR limit = pThread->GetCachedStackSufficientExecutionLimit(); if (current < limit) { FCThrowVoid(kInsufficientExecutionStackException); } } FCIMPLEND // As with EnsureSufficientExecutionStack, this method checks and returns whether there is // sufficient stack to execute the average Framework method, but rather than throwing, // it simply returns a Boolean: true for sufficient stack space, otherwise false. FCIMPL0(FC_BOOL_RET, ReflectionInvocation::TryEnsureSufficientExecutionStack) { FCALL_CONTRACT; Thread *pThread = GetThread(); // Same logic as EnsureSufficientExecutionStack UINT_PTR current = reinterpret_cast<UINT_PTR>(&pThread); UINT_PTR limit = pThread->GetCachedStackSufficientExecutionLimit(); FC_RETURN_BOOL(current >= limit); } FCIMPLEND FCIMPL4(void, ReflectionInvocation::MakeTypedReference, TypedByRef * value, Object* targetUNSAFE, ArrayBase* fldsUNSAFE, ReflectClassBaseObject *pFieldTypeUNSAFE) { CONTRACTL { FCALL_CHECK; PRECONDITION(CheckPointer(targetUNSAFE)); PRECONDITION(CheckPointer(fldsUNSAFE)); } CONTRACTL_END; DWORD offset = 0; struct _gc { OBJECTREF target; BASEARRAYREF flds; REFLECTCLASSBASEREF refFieldType; } gc; gc.target = (OBJECTREF) targetUNSAFE; gc.flds = (BASEARRAYREF) fldsUNSAFE; gc.refFieldType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pFieldTypeUNSAFE); TypeHandle fieldType = gc.refFieldType->GetType(); HELPER_METHOD_FRAME_BEGIN_PROTECT(gc); GCPROTECT_BEGININTERIOR (value) DWORD cnt = gc.flds->GetNumComponents(); FieldDesc** fields = (FieldDesc**)gc.flds->GetDataPtr(); for (DWORD i = 0; i < cnt; i++) { FieldDesc* pField = fields[i]; offset += pField->GetOffset(); } // Fields already are prohibted from having ArgIterator and RuntimeArgumentHandles _ASSERTE(!gc.target->GetTypeHandle().GetMethodTable()->IsByRefLike()); // Create the ByRef value->data = ((BYTE *)(gc.target->GetAddress() + offset)) + sizeof(Object); value->type = fieldType; GCPROTECT_END(); HELPER_METHOD_FRAME_END(); } FCIMPLEND // This is an internal helper function to TypedReference class. // It extracts the object from the typed reference. FCIMPL1(Object*, ReflectionInvocation::TypedReferenceToObject, TypedByRef * value) { FCALL_CONTRACT; OBJECTREF Obj = NULL; TypeHandle th(value->type); if (th.IsNull()) FCThrowRes(kArgumentNullException, W("ArgumentNull_TypedRefType")); MethodTable* pMT = th.GetMethodTable(); PREFIX_ASSUME(NULL != pMT); if (pMT->IsValueType()) { // value->data is protected by the caller HELPER_METHOD_FRAME_BEGIN_RET_1(Obj); Obj = pMT->Box(value->data); HELPER_METHOD_FRAME_END(); } else { Obj = ObjectToOBJECTREF(*((Object**)value->data)); } return OBJECTREFToObject(Obj); } FCIMPLEND FCIMPL2_IV(Object*, ReflectionInvocation::CreateEnum, ReflectClassBaseObject *pTypeUNSAFE, INT64 value) { FCALL_CONTRACT; REFLECTCLASSBASEREF refType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pTypeUNSAFE); TypeHandle typeHandle = refType->GetType(); _ASSERTE(typeHandle.IsEnum()); OBJECTREF obj = NULL; HELPER_METHOD_FRAME_BEGIN_RET_1(refType); MethodTable *pEnumMT = typeHandle.AsMethodTable(); obj = pEnumMT->Box(ArgSlotEndianessFixup ((ARG_SLOT*)&value, pEnumMT->GetNumInstanceFieldBytes())); HELPER_METHOD_FRAME_END(); return OBJECTREFToObject(obj); } FCIMPLEND #ifdef FEATURE_COMINTEROP FCIMPL8(Object*, ReflectionInvocation::InvokeDispMethod, ReflectClassBaseObject* refThisUNSAFE, StringObject* nameUNSAFE, INT32 invokeAttr, Object* targetUNSAFE, PTRArray* argsUNSAFE, PTRArray* byrefModifiersUNSAFE, LCID lcid, PTRArray* namedParametersUNSAFE) { FCALL_CONTRACT; struct _gc { REFLECTCLASSBASEREF refThis; STRINGREF name; OBJECTREF target; PTRARRAYREF args; PTRARRAYREF byrefModifiers; PTRARRAYREF namedParameters; OBJECTREF RetObj; } gc; gc.refThis = (REFLECTCLASSBASEREF) refThisUNSAFE; gc.name = (STRINGREF) nameUNSAFE; gc.target = (OBJECTREF) targetUNSAFE; gc.args = (PTRARRAYREF) argsUNSAFE; gc.byrefModifiers = (PTRARRAYREF) byrefModifiersUNSAFE; gc.namedParameters = (PTRARRAYREF) namedParametersUNSAFE; gc.RetObj = NULL; HELPER_METHOD_FRAME_BEGIN_RET_PROTECT(gc); _ASSERTE(gc.target != NULL); _ASSERTE(gc.target->GetMethodTable()->IsComObjectType()); WORD flags = 0; if (invokeAttr & BINDER_InvokeMethod) flags |= DISPATCH_METHOD; if (invokeAttr & BINDER_GetProperty) flags |= DISPATCH_PROPERTYGET; if (invokeAttr & BINDER_SetProperty) flags = DISPATCH_PROPERTYPUT | DISPATCH_PROPERTYPUTREF; if (invokeAttr & BINDER_PutDispProperty) flags = DISPATCH_PROPERTYPUT; if (invokeAttr & BINDER_PutRefDispProperty) flags = DISPATCH_PROPERTYPUTREF; if (invokeAttr & BINDER_CreateInstance) flags = DISPATCH_CONSTRUCT; IUInvokeDispMethod(&gc.refThis, &gc.target, (OBJECTREF*)&gc.name, NULL, (OBJECTREF*)&gc.args, (OBJECTREF*)&gc.byrefModifiers, (OBJECTREF*)&gc.namedParameters, &gc.RetObj, lcid, flags, invokeAttr & BINDER_IgnoreReturn, invokeAttr & BINDER_IgnoreCase); HELPER_METHOD_FRAME_END(); return OBJECTREFToObject(gc.RetObj); } FCIMPLEND #endif // FEATURE_COMINTEROP FCIMPL2(void, ReflectionInvocation::GetGUID, ReflectClassBaseObject* refThisUNSAFE, GUID * result) { FCALL_CONTRACT; REFLECTCLASSBASEREF refThis = (REFLECTCLASSBASEREF) refThisUNSAFE; HELPER_METHOD_FRAME_BEGIN_1(refThis); GCPROTECT_BEGININTERIOR (result); if (result == NULL || refThis == NULL) COMPlusThrow(kNullReferenceException); TypeHandle type = refThis->GetType(); if (type.IsTypeDesc() || type.IsArray()) { memset(result,0,sizeof(GUID)); goto lExit; } #ifdef FEATURE_COMINTEROP if (IsComObjectClass(type)) { SyncBlock* pSyncBlock = refThis->GetSyncBlock(); #ifdef FEATURE_COMINTEROP_UNMANAGED_ACTIVATION ComClassFactory* pComClsFac = pSyncBlock->GetInteropInfo()->GetComClassFactory(); if (pComClsFac) { memcpyNoGCRefs(result, &pComClsFac->m_rclsid, sizeof(GUID)); } else #endif // FEATURE_COMINTEROP_UNMANAGED_ACTIVATION { memset(result, 0, sizeof(GUID)); } goto lExit; } #endif // FEATURE_COMINTEROP GUID guid; type.AsMethodTable()->GetGuid(&guid, TRUE); memcpyNoGCRefs(result, &guid, sizeof(GUID)); lExit: ; GCPROTECT_END(); HELPER_METHOD_FRAME_END(); } FCIMPLEND /* * Given a TypeHandle, validates whether it's legal to construct a real * instance of that type. Throws an exception if the instantiation would * be illegal; e.g., type is void or a pointer or an open generic. This * doesn't guarantee that a ctor will succeed, only that the VM is able * to support an instance of this type on the heap. * ========== * The 'fForGetUninitializedInstance' parameter controls the type of * exception that is thrown if a check fails. */ void RuntimeTypeHandle::ValidateTypeAbleToBeInstantiated( TypeHandle typeHandle, bool fGetUninitializedObject) { STANDARD_VM_CONTRACT; // Don't allow void if (typeHandle.GetSignatureCorElementType() == ELEMENT_TYPE_VOID) { COMPlusThrow(kArgumentException, W("NotSupported_Type")); } // Don't allow arrays, pointers, byrefs, or function pointers if (typeHandle.IsTypeDesc() || typeHandle.IsArray()) { COMPlusThrow(fGetUninitializedObject ? kArgumentException : kMissingMethodException, W("NotSupported_Type")); } MethodTable* pMT = typeHandle.AsMethodTable(); PREFIX_ASSUME(pMT != NULL); // Don't allow creating instances of delegates if (pMT->IsDelegate()) { COMPlusThrow(kArgumentException, W("NotSupported_Type")); } // Don't allow string or string-like (variable length) types. if (pMT->HasComponentSize()) { COMPlusThrow(fGetUninitializedObject ? kArgumentException : kMissingMethodException, W("Argument_NoUninitializedStrings")); } // Don't allow abstract classes or interface types if (pMT->IsAbstract()) { RuntimeExceptionKind exKind = fGetUninitializedObject ? kMemberAccessException : kMissingMethodException; if (pMT->IsInterface()) COMPlusThrow(exKind, W("Acc_CreateInterface")); else COMPlusThrow(exKind, W("Acc_CreateAbst")); } // Don't allow generic variables (e.g., the 'T' from List<T>) // or open generic types (List<>). if (typeHandle.ContainsGenericVariables()) { COMPlusThrow(kMemberAccessException, W("Acc_CreateGeneric")); } // Don't allow generics instantiated over __Canon if (pMT->IsSharedByGenericInstantiations()) { COMPlusThrow(kNotSupportedException, W("NotSupported_Type")); } // Don't allow ref structs if (pMT->IsByRefLike()) { COMPlusThrow(kNotSupportedException, W("NotSupported_ByRefLike")); } } /* * Given a RuntimeType, queries info on how to instantiate the object. * pRuntimeType - [required] the RuntimeType object * ppfnAllocator - [required, null-init] fnptr to the allocator * mgd sig: void* -> object * pvAllocatorFirstArg - [required, null-init] first argument to the allocator * (normally, but not always, the MethodTable*) * ppfnCtor - [required, null-init] the instance's parameterless ctor, * mgd sig object -> void, or null if no ctor is needed for this type * pfCtorIsPublic - [required, null-init] whether the parameterless ctor is public * ========== * This method will not run the type's static cctor. * This method will not allocate an instance of the target type. */ extern "C" void QCALLTYPE RuntimeTypeHandle_GetActivationInfo( QCall::ObjectHandleOnStack pRuntimeType, PCODE* ppfnAllocator, void** pvAllocatorFirstArg, PCODE* ppfnCtor, BOOL* pfCtorIsPublic ) { CONTRACTL{ QCALL_CHECK; PRECONDITION(CheckPointer(ppfnAllocator)); PRECONDITION(CheckPointer(pvAllocatorFirstArg)); PRECONDITION(CheckPointer(ppfnCtor)); PRECONDITION(CheckPointer(pfCtorIsPublic)); PRECONDITION(*ppfnAllocator == NULL); PRECONDITION(*pvAllocatorFirstArg == NULL); PRECONDITION(*ppfnCtor == NULL); PRECONDITION(*pfCtorIsPublic == FALSE); } CONTRACTL_END; TypeHandle typeHandle = NULL; BEGIN_QCALL; { GCX_COOP(); // We need to take the RuntimeType itself rather than the RuntimeTypeHandle, // as the COM CLSID is stored in the RuntimeType object's sync block, and we // might need to pull it out later in this method. typeHandle = ((REFLECTCLASSBASEREF)pRuntimeType.Get())->GetType(); } RuntimeTypeHandle::ValidateTypeAbleToBeInstantiated(typeHandle, false /* fGetUninitializedObject */); MethodTable* pMT = typeHandle.AsMethodTable(); PREFIX_ASSUME(pMT != NULL); #ifdef FEATURE_COMINTEROP // COM allocation can involve the __ComObject base type (with attached CLSID) or a // VM-implemented [ComImport] class. For CreateInstance, the flowchart is: // - For __ComObject, // .. on Windows, bypass normal newobj logic and use ComClassFactory::CreateInstance. // .. on non-Windows, treat as a normal class, type has no special handling in VM. // - For [ComImport] class, treat as a normal class. VM will replace default // ctor with COM activation logic on supported platforms, else ctor itself will PNSE. // IsComObjectClass is the correct way to check for __ComObject specifically if (IsComObjectClass(typeHandle)) { void* pClassFactory = NULL; #ifdef FEATURE_COMINTEROP_UNMANAGED_ACTIVATION { // Need to enter cooperative mode to manipulate OBJECTREFs GCX_COOP(); SyncBlock* pSyncBlock = pRuntimeType.Get()->GetSyncBlock(); pClassFactory = (void*)pSyncBlock->GetInteropInfo()->GetComClassFactory(); } #endif // FEATURE_COMINTEROP_UNMANAGED_ACTIVATION if (pClassFactory == NULL) { // no factory *or* unmanaged activation is not enabled in this runtime COMPlusThrow(kInvalidComObjectException, IDS_EE_NO_BACKING_CLASS_FACTORY); } // managed sig: ComClassFactory* -> object (via FCALL) *ppfnAllocator = CoreLibBinder::GetMethod(METHOD__RT_TYPE_HANDLE__ALLOCATECOMOBJECT)->GetMultiCallableAddrOfCode(); *pvAllocatorFirstArg = pClassFactory; *ppfnCtor = NULL; // no ctor call needed; activation handled entirely by the allocator *pfCtorIsPublic = TRUE; // no ctor call needed => assume 'public' equivalent } else #endif // FEATURE_COMINTEROP if (pMT->IsNullable()) { // CreateInstance returns null given Nullable<T> *ppfnAllocator = NULL; *pvAllocatorFirstArg = NULL; *ppfnCtor = NULL; *pfCtorIsPublic = TRUE; // no ctor call needed => assume 'public' equivalent } else { // managed sig: MethodTable* -> object (via JIT helper) bool fHasSideEffectsUnused; *ppfnAllocator = CEEJitInfo::getHelperFtnStatic(CEEInfo::getNewHelperStatic(pMT, &fHasSideEffectsUnused)); *pvAllocatorFirstArg = pMT; if (pMT->HasDefaultConstructor()) { // managed sig: object -> void // for ctors on value types, lookup boxed entry point stub MethodDesc* pMD = pMT->GetDefaultConstructor(pMT->IsValueType() /* forceBoxedEntryPoint */); _ASSERTE(pMD != NULL); PCODE pCode = pMD->GetMultiCallableAddrOfCode(); _ASSERTE(pCode != NULL); *ppfnCtor = pCode; *pfCtorIsPublic = pMD->IsPublic(); } else if (pMT->IsValueType()) { *ppfnCtor = NULL; // no ctor call needed; we're creating a boxed default(T) *pfCtorIsPublic = TRUE; // no ctor call needed => assume 'public' equivalent } else { // reference type with no parameterless ctor - we can't instantiate this COMPlusThrow(kMissingMethodException, W("Arg_NoDefCTorWithoutTypeName")); } } pMT->EnsureInstanceActive(); END_QCALL; } /* * Given a ComClassFactory*, calls the COM allocator * and returns a RCW. */ FCIMPL1(Object*, RuntimeTypeHandle::AllocateComObject, void* pClassFactory) { CONTRACTL{ FCALL_CHECK; PRECONDITION(CheckPointer(pClassFactory)); } CONTRACTL_END; OBJECTREF rv = NULL; bool allocated = false; HELPER_METHOD_FRAME_BEGIN_RET_1(rv); #ifdef FEATURE_COMINTEROP #ifdef FEATURE_COMINTEROP_UNMANAGED_ACTIVATION { if (pClassFactory != NULL) { rv = ((ComClassFactory*)pClassFactory)->CreateInstance(NULL); allocated = true; } } #endif // FEATURE_COMINTEROP_UNMANAGED_ACTIVATION #endif // FEATURE_COMINTEROP if (!allocated) { #ifdef FEATURE_COMINTEROP COMPlusThrow(kInvalidComObjectException, IDS_EE_NO_BACKING_CLASS_FACTORY); #else // FEATURE_COMINTEROP COMPlusThrow(kPlatformNotSupportedException, IDS_EE_NO_BACKING_CLASS_FACTORY); #endif // FEATURE_COMINTEROP } HELPER_METHOD_FRAME_END(); return OBJECTREFToObject(rv); } FCIMPLEND //************************************************************************************************* //************************************************************************************************* //************************************************************************************************* // ReflectionSerialization //************************************************************************************************* //************************************************************************************************* //************************************************************************************************* extern "C" void QCALLTYPE ReflectionSerialization_GetUninitializedObject(QCall::TypeHandle pType, QCall::ObjectHandleOnStack retObject) { QCALL_CONTRACT; BEGIN_QCALL; TypeHandle type = pType.AsTypeHandle(); RuntimeTypeHandle::ValidateTypeAbleToBeInstantiated(type, true /* fForGetUninitializedInstance */); MethodTable* pMT = type.AsMethodTable(); #ifdef FEATURE_COMINTEROP // Also do not allow allocation of uninitialized RCWs (COM objects). if (pMT->IsComObjectType()) COMPlusThrow(kNotSupportedException, W("NotSupported_ManagedActivation")); #endif // FEATURE_COMINTEROP // If it is a nullable, return the underlying type instead. if (pMT->IsNullable()) pMT = pMT->GetInstantiation()[0].GetMethodTable(); { GCX_COOP(); // Allocation will invoke any precise static cctors as needed. retObject.Set(pMT->Allocate()); } END_QCALL; } //************************************************************************************************* //************************************************************************************************* //************************************************************************************************* // ReflectionEnum //************************************************************************************************* //************************************************************************************************* //************************************************************************************************* FCIMPL1(Object *, ReflectionEnum::InternalGetEnumUnderlyingType, ReflectClassBaseObject *target) { FCALL_CONTRACT; VALIDATEOBJECT(target); TypeHandle th = target->GetType(); _ASSERTE(th.IsEnum()); OBJECTREF result = NULL; HELPER_METHOD_FRAME_BEGIN_RET_0(); MethodTable *pMT = CoreLibBinder::GetElementType(th.AsMethodTable()->GetInternalCorElementType()); result = pMT->GetManagedClassObject(); HELPER_METHOD_FRAME_END(); return OBJECTREFToObject(result); } FCIMPLEND FCIMPL1(INT32, ReflectionEnum::InternalGetCorElementType, Object *pRefThis) { FCALL_CONTRACT; VALIDATEOBJECT(pRefThis); if (pRefThis == NULL) FCThrowArgumentNull(NULL); MethodTable* pMT = pRefThis->GetMethodTable(); _ASSERTE(pMT->IsEnum()); // MethodTable::GetInternalCorElementType has unnecessary overhead for enums // Call EEClass::GetInternalCorElementType directly to avoid it return pMT->GetClass_NoLogging()->GetInternalCorElementType(); } FCIMPLEND //******************************************************************************* struct TempEnumValue { LPCUTF8 name; UINT64 value; }; //******************************************************************************* class TempEnumValueSorter : public CQuickSort<TempEnumValue> { public: TempEnumValueSorter(TempEnumValue *pArray, SSIZE_T iCount) : CQuickSort<TempEnumValue>(pArray, iCount) { LIMITED_METHOD_CONTRACT; } int Compare(TempEnumValue *pFirst, TempEnumValue *pSecond) { LIMITED_METHOD_CONTRACT; if (pFirst->value == pSecond->value) return 0; if (pFirst->value > pSecond->value) return 1; else return -1; } }; extern "C" void QCALLTYPE Enum_GetValuesAndNames(QCall::TypeHandle pEnumType, QCall::ObjectHandleOnStack pReturnValues, QCall::ObjectHandleOnStack pReturnNames, BOOL fGetNames) { QCALL_CONTRACT; BEGIN_QCALL; TypeHandle th = pEnumType.AsTypeHandle(); if (!th.IsEnum()) COMPlusThrow(kArgumentException, W("Arg_MustBeEnum")); MethodTable *pMT = th.AsMethodTable(); IMDInternalImport *pImport = pMT->GetMDImport(); StackSArray<TempEnumValue> temps; UINT64 previousValue = 0; HENUMInternalHolder fieldEnum(pImport); fieldEnum.EnumInit(mdtFieldDef, pMT->GetCl()); // // Note that we're fine treating signed types as unsigned, because all we really // want to do is sort them based on a convenient strong ordering. // BOOL sorted = TRUE; CorElementType type = pMT->GetInternalCorElementType(); mdFieldDef field; while (pImport->EnumNext(&fieldEnum, &field)) { DWORD dwFlags; IfFailThrow(pImport->GetFieldDefProps(field, &dwFlags)); if (IsFdStatic(dwFlags)) { TempEnumValue temp; if (fGetNames) IfFailThrow(pImport->GetNameOfFieldDef(field, &temp.name)); UINT64 value = 0; MDDefaultValue defaultValue; IfFailThrow(pImport->GetDefaultValue(field, &defaultValue)); // The following code assumes that the address of all union members is the same. static_assert_no_msg(offsetof(MDDefaultValue, m_byteValue) == offsetof(MDDefaultValue, m_usValue)); static_assert_no_msg(offsetof(MDDefaultValue, m_ulValue) == offsetof(MDDefaultValue, m_ullValue)); PVOID pValue = &defaultValue.m_byteValue; switch (type) { case ELEMENT_TYPE_I1: value = *((INT8 *)pValue); break; case ELEMENT_TYPE_U1: case ELEMENT_TYPE_BOOLEAN: value = *((UINT8 *)pValue); break; case ELEMENT_TYPE_I2: value = *((INT16 *)pValue); break; case ELEMENT_TYPE_U2: case ELEMENT_TYPE_CHAR: value = *((UINT16 *)pValue); break; case ELEMENT_TYPE_I4: IN_TARGET_32BIT(case ELEMENT_TYPE_I:) value = *((INT32 *)pValue); break; case ELEMENT_TYPE_U4: IN_TARGET_32BIT(case ELEMENT_TYPE_U:) value = *((UINT32 *)pValue); break; case ELEMENT_TYPE_I8: case ELEMENT_TYPE_U8: IN_TARGET_64BIT(case ELEMENT_TYPE_I:) IN_TARGET_64BIT(case ELEMENT_TYPE_U:) value = *((INT64 *)pValue); break; default: break; } temp.value = value; // // Check to see if we are already sorted. This may seem extraneous, but is // actually probably the normal case. // if (previousValue > value) sorted = FALSE; previousValue = value; temps.Append(temp); } } TempEnumValue * pTemps = &(temps[0]); DWORD cFields = temps.GetCount(); if (!sorted) { TempEnumValueSorter sorter(pTemps, cFields); sorter.Sort(); } { GCX_COOP(); struct gc { I8ARRAYREF values; PTRARRAYREF names; } gc; gc.values = NULL; gc.names = NULL; GCPROTECT_BEGIN(gc); { gc.values = (I8ARRAYREF) AllocatePrimitiveArray(ELEMENT_TYPE_U8, cFields); INT64 *pToValues = gc.values->GetDirectPointerToNonObjectElements(); for (DWORD i = 0; i < cFields; i++) { pToValues[i] = pTemps[i].value; } pReturnValues.Set(gc.values); } if (fGetNames) { gc.names = (PTRARRAYREF) AllocateObjectArray(cFields, g_pStringClass); for (DWORD i = 0; i < cFields; i++) { STRINGREF str = StringObject::NewString(pTemps[i].name); gc.names->SetAt(i, str); } pReturnNames.Set(gc.names); } GCPROTECT_END(); } END_QCALL; } FCIMPL2_IV(Object*, ReflectionEnum::InternalBoxEnum, ReflectClassBaseObject* target, INT64 value) { FCALL_CONTRACT; VALIDATEOBJECT(target); OBJECTREF ret = NULL; MethodTable* pMT = target->GetType().AsMethodTable(); HELPER_METHOD_FRAME_BEGIN_RET_0(); ret = pMT->Box(ArgSlotEndianessFixup((ARG_SLOT*)&value, pMT->GetNumInstanceFieldBytes())); HELPER_METHOD_FRAME_END(); return OBJECTREFToObject(ret); } FCIMPLEND
1
dotnet/runtime
66,234
Remove compiler warning suppression
Contributes to https://github.com/dotnet/runtime/issues/66154 All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address. ~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~ Upstream PR: https://github.com/libunwind/libunwind/pull/333 /cc @GrabYourPitchforks
AaronRobinsonMSFT
2022-03-05T03:45:49Z
2022-03-09T00:57:12Z
220e67755ae6323a01011b83070dff6f84b02519
853e494abd1c1e12d28a76829b4680af7f694afa
Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154 All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address. ~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~ Upstream PR: https://github.com/libunwind/libunwind/pull/333 /cc @GrabYourPitchforks
./src/coreclr/vm/virtualcallstub.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // File: VirtualCallStub.h // // // See code:VirtualCallStubManager for details // // ============================================================================ #ifndef _VIRTUAL_CALL_STUB_H #define _VIRTUAL_CALL_STUB_H #define CHAIN_LOOKUP #if defined(TARGET_X86) // If this is uncommented, leaves a file "StubLog_<pid>.log" with statistics on the behavior // of stub-based interface dispatch. //#define STUB_LOGGING #endif #include "stubmgr.h" ///////////////////////////////////////////////////////////////////////////////////// // Forward class declarations class FastTable; class BucketTable; class Entry; class Prober; class VirtualCallStubManager; class VirtualCallStubManagerManager; struct LookupHolder; struct DispatchHolder; struct ResolveHolder; struct VTableCallHolder; ///////////////////////////////////////////////////////////////////////////////////// // Forward function declarations extern "C" void InContextTPQuickDispatchAsmStub(); extern "C" PCODE STDCALL VSD_ResolveWorker(TransitionBlock * pTransitionBlock, TADDR siteAddrForRegisterIndirect, size_t token #ifndef TARGET_X86 , UINT_PTR flags #endif ); ///////////////////////////////////////////////////////////////////////////////////// #if defined(TARGET_X86) || defined(TARGET_AMD64) typedef INT32 DISPL; #endif ///////////////////////////////////////////////////////////////////////////////////// // Represents the struct that is added to the resolve cache // NOTE: If you change the layout of this struct, you'll need to update various // ASM helpers in VirtualCallStubCpu that rely on offsets of members. // struct ResolveCacheElem { void *pMT; size_t token; // DispatchToken void *target; // These are used for chaining ResolveCacheElem *pNext; ResolveCacheElem *Next() { LIMITED_METHOD_CONTRACT; return VolatileLoad(&pNext); } #ifdef _DEBUG UINT16 debug_hash; UINT16 debug_index; #endif // _DEBUG BOOL Equals(size_t token, void *pMT) { LIMITED_METHOD_CONTRACT; return (this->pMT == pMT && this->token == token); } BOOL Equals(ResolveCacheElem *pElem) { WRAPPER_NO_CONTRACT; return Equals(pElem->token, pElem->pMT); } }; enum { e_resolveCacheElem_sizeof_mt = sizeof(void *), e_resolveCacheElem_sizeof_token = sizeof(size_t), e_resolveCacheElem_sizeof_target = sizeof(void *), e_resolveCacheElem_sizeof_next = sizeof(ResolveCacheElem *), e_resolveCacheElem_offset_mt = 0, e_resolveCacheElem_offset_token = e_resolveCacheElem_offset_mt + e_resolveCacheElem_sizeof_mt, e_resolveCacheElem_offset_target = e_resolveCacheElem_offset_token + e_resolveCacheElem_sizeof_token, e_resolveCacheElem_offset_next = e_resolveCacheElem_offset_target + e_resolveCacheElem_sizeof_target, }; ///////////////////////////////////////////////////////////////////////////////////// // A utility class to help manipulate a call site struct StubCallSite { friend class VirtualCallStubManager; private: // On x86 are four possible kinds of callsites when you take into account all features // Relative: direct call, e.g. "call addr". Not used currently. // RelativeIndirect (JmpRel): indirect call through a relative address, e.g. "call [addr]" // RegisterIndirect: indirect call through a register, e.g. "call [eax]" // DelegateCallSite: anything else, tail called through a register by shuffle thunk, e.g. "jmp [eax]" // // On all other platforms we always use an indirect call through an indirection cell // In these cases all calls are made by the platform equivalent of "call [addr]". // // DelegateCallSite are particular in that they can come in a variety of forms: // a direct delegate call has a sequence defined by the jit but a multicast or wrapper delegate // are defined in a stub and have a different shape // PTR_PCODE m_siteAddr; // Stores the address of an indirection cell PCODE m_returnAddr; public: #if defined(TARGET_X86) StubCallSite(TADDR siteAddrForRegisterIndirect, PCODE returnAddr); PCODE GetCallerAddress(); #else // !defined(TARGET_X86) // On platforms where we always use an indirection cell things // are much simpler - the siteAddr always stores a pointer to a // value that in turn points to the indirection cell. StubCallSite(TADDR siteAddr, PCODE returnAddr) { LIMITED_METHOD_CONTRACT; m_siteAddr = dac_cast<PTR_PCODE>(siteAddr); m_returnAddr = returnAddr; } PCODE GetCallerAddress() { LIMITED_METHOD_CONTRACT; return m_returnAddr; } #endif // !defined(TARGET_X86) PCODE GetSiteTarget() { WRAPPER_NO_CONTRACT; return *(GetIndirectCell()); } void SetSiteTarget(PCODE newTarget); PTR_PCODE GetIndirectCell() { LIMITED_METHOD_CONTRACT; return dac_cast<PTR_PCODE>(m_siteAddr); } PTR_PCODE * GetIndirectCellAddress() { LIMITED_METHOD_CONTRACT; return &m_siteAddr; } PCODE GetReturnAddress() { LIMITED_METHOD_CONTRACT; return m_returnAddr; } }; // These are the assembly language entry points that the stubs use when they want to go into the EE extern "C" void ResolveWorkerAsmStub(); // resolve a token and transfer control to that method extern "C" void ResolveWorkerChainLookupAsmStub(); // for chaining of entries in the cache #ifdef TARGET_X86 extern "C" void BackPatchWorkerAsmStub(); // backpatch a call site to point to a different stub #ifdef TARGET_UNIX extern "C" void BackPatchWorkerStaticStub(PCODE returnAddr, TADDR siteAddrForRegisterIndirect); #endif // TARGET_UNIX #endif // TARGET_X86 typedef VPTR(class VirtualCallStubManager) PTR_VirtualCallStubManager; // VirtualCallStubManager is the heart of the stub dispatch logic. See the book of the runtime entry // // file:../../doc/BookOfTheRuntime/ClassLoader/VirtualStubDispatchDesign.doc // // The basic idea is that a call to an interface (it could also be used for virtual calls in general, but we // do not do this), is simply the code // // call [DispatchCell] // // Where we make sure 'DispatchCell' points at stubs that will do the right thing. DispatchCell is writable // so we can udpate the code over time. There are three basic types of stubs that the dispatch cell can point // to. // * Lookup: The intial stub that has no 'fast path' and simply pushes a ID for interface being called // and calls into the runtime at code:VirtualCallStubManager.ResolveWorkerStatic. // * Dispatch: Lookup stubs are patched to this stub which has a fast path that checks for a particular // Method Table and if that fails jumps to code that // * Decrements a 'missCount' (starts out as code:STUB_MISS_COUNT_VALUE). If this count goes to zero // code:VirtualCallStubManager.BackPatchWorkerStatic is called, morphs it into a resolve stub // (however since this decrementing logic is SHARED among all dispatch stubs, it may take // multiples of code:STUB_MISS_COUNT_VALUE if mulitple call sites are actively polymorphic (this // seems unlikley). // * Calls a resolve stub (Whenever a dispatch stub is created, it always has a cooresponding resolve // stub (but the resolve stubs are shared among many dispatch stubs). // * Resolve: see code:ResolveStub. This looks up the Method table in a process wide cache (see // code:ResolveCacheElem, and if found, jumps to it. This code path is about 17 instructions long (so // pretty fast, but certainly much slower than a normal call). If the method table is not found in // the cache, it calls into the runtime code:VirtualCallStubManager.ResolveWorkerStatic, which // populates it. // So the general progression is call site's cells // * start out life pointing to a lookup stub // * On first call they get updated into a dispatch stub. When this misses, it calls a resolve stub, // which populates a resovle stub's cache, but does not update the call site' cell (thus it is still // pointing at the dispatch cell. // * After code:STUB_MISS_COUNT_VALUE misses, we update the call site's cell to point directly at the // resolve stub (thus avoiding the overhead of the quick check that always seems to be failing and // the miss count update). // // QUESTION: What is the lifetimes of the various stubs and hash table entries? // // QUESTION: There does not seem to be any logic that will change a call site's cell once it becomes a // Resolve stub. Thus once a particular call site becomes a Resolve stub we live with the Resolve stub's // (in)efficiency forever. // // see code:#StubDispatchNotes for more class VirtualCallStubManager : public StubManager { friend class VirtualCallStubManagerManager; friend class VirtualCallStubManagerIterator; #if defined(DACCESS_COMPILE) friend class ClrDataAccess; friend class DacDbiInterfaceImpl; #endif // DACCESS_COMPILE VPTR_VTABLE_CLASS(VirtualCallStubManager, StubManager) public: #ifdef _DEBUG virtual const char * DbgGetName() { LIMITED_METHOD_CONTRACT; return "VirtualCallStubManager"; } #endif // The reason for our existence, return a callstub for type id and slot number // where type id = 0 for the class contract (i.e. a virtual call), and type id > 0 for an // interface invoke where the id indicates which interface it is. // // The function is idempotent, i.e. // you'll get the same callstub twice if you call it with identical inputs. PCODE GetCallStub(TypeHandle ownerType, MethodDesc *pMD); PCODE GetCallStub(TypeHandle ownerType, DWORD slot); // Stubs for vtable-based virtual calls with no lookups PCODE GetVTableCallStub(DWORD slot); // Generate an fresh indirection cell. BYTE* GenerateStubIndirection(PCODE stub, BOOL fUseRecycledCell = FALSE); // Set up static data structures - called during EEStartup static void InitStatic(); static void UninitStatic(); // Per instance initialization - called during AppDomain::Init and ::Uninit and for collectible loader allocators void Init(BaseDomain* pDomain, LoaderAllocator *pLoaderAllocator); void Uninit(); //@TODO: the logging should be tied into the VMs normal loggin mechanisms, //@TODO: for now we just always write a short log file called "StubLog_<pid>.log" static void StartupLogging(); static void LoggingDump(); static void FinishLogging(); static void ResetCache(); // Reclaim/rearrange any structures that can only be done during a gc sync point. // This is the mechanism we are using to avoid synchronization of alot of our // cache and hash table accesses. We are requiring that during a gc sync point we are not // executing any stub code at all, hence at this time we are serialized on a single thread (gc) // and no other thread is accessing the data structures. static void ReclaimAll(); void Reclaim(); #ifndef DACCESS_COMPILE VirtualCallStubManager() : StubManager(), lookup_rangeList(), resolve_rangeList(), dispatch_rangeList(), cache_entry_rangeList(), vtable_rangeList(), parentDomain(NULL), m_loaderAllocator(NULL), m_initialReservedMemForHeaps(NULL), m_FreeIndCellList(NULL), m_RecycledIndCellList(NULL), indcell_heap(NULL), cache_entry_heap(NULL), lookup_heap(NULL), dispatch_heap(NULL), resolve_heap(NULL), #ifdef TARGET_AMD64 m_fShouldAllocateLongJumpDispatchStubs(FALSE), #endif lookups(NULL), cache_entries(NULL), dispatchers(NULL), resolvers(NULL), m_counters(NULL), m_cur_counter_block(NULL), m_cur_counter_block_for_reclaim(NULL), m_cur_counter_block_for_reclaim_index(NULL), m_pNext(NULL) { LIMITED_METHOD_CONTRACT; ZeroMemory(&stats, sizeof(stats)); } ~VirtualCallStubManager(); #endif // !DACCESS_COMPILE enum StubKind { SK_UNKNOWN, SK_LOOKUP, // Lookup Stubs are SLOW stubs that simply call into the runtime to do all work. SK_DISPATCH, // Dispatch Stubs have a fast check for one type otherwise jumps to runtime. Works for monomorphic sites SK_RESOLVE, // Resolve Stubs do a hash lookup before fallling back to the runtime. Works for polymorphic sites. SK_VTABLECALL, // Stub that jumps to a target method using vtable-based indirections. Works for non-interface calls. SK_BREAKPOINT }; // peek at the assembly code and predict which kind of a stub we have StubKind predictStubKind(PCODE stubStartAddress); /* know thine own stubs. It is possible that when multiple virtualcallstub managers are built that these may need to become non-static, and the callers modified accordingly */ StubKind getStubKind(PCODE stubStartAddress, BOOL usePredictStubKind = TRUE) { WRAPPER_NO_CONTRACT; SUPPORTS_DAC; // This method can called with stubStartAddress==NULL, e.g. when handling null reference exceptions // caused by IP=0. Early out for this case to avoid confusing handled access violations inside predictStubKind. if (PCODEToPINSTR(stubStartAddress) == NULL) return SK_UNKNOWN; // Rather than calling IsInRange(stubStartAddress) for each possible stub kind // we can peek at the assembly code and predict which kind of a stub we have StubKind predictedKind = (usePredictStubKind) ? predictStubKind(stubStartAddress) : SK_UNKNOWN; if (predictedKind == SK_DISPATCH) { if (isDispatchingStub(stubStartAddress)) return SK_DISPATCH; } else if (predictedKind == SK_LOOKUP) { if (isLookupStub(stubStartAddress)) return SK_LOOKUP; } else if (predictedKind == SK_RESOLVE) { if (isResolvingStub(stubStartAddress)) return SK_RESOLVE; } else if (predictedKind == SK_VTABLECALL) { if (isVTableCallStub(stubStartAddress)) return SK_VTABLECALL; } // This is the slow case. If the predict returned SK_UNKNOWN, SK_BREAKPOINT, // or the predict was found to be incorrect when checked against the RangeLists // (isXXXStub), then we'll check each stub heap in sequence. if (isDispatchingStub(stubStartAddress)) return SK_DISPATCH; else if (isLookupStub(stubStartAddress)) return SK_LOOKUP; else if (isResolvingStub(stubStartAddress)) return SK_RESOLVE; else if (isVTableCallStub(stubStartAddress)) return SK_VTABLECALL; return SK_UNKNOWN; } inline BOOL isStub(PCODE stubStartAddress) { WRAPPER_NO_CONTRACT; SUPPORTS_DAC; return (getStubKind(stubStartAddress) != SK_UNKNOWN); } BOOL isDispatchingStub(PCODE stubStartAddress) { WRAPPER_NO_CONTRACT; SUPPORTS_DAC; return GetDispatchRangeList()->IsInRange(stubStartAddress); } BOOL isResolvingStub(PCODE stubStartAddress) { WRAPPER_NO_CONTRACT; SUPPORTS_DAC; return GetResolveRangeList()->IsInRange(stubStartAddress); } BOOL isLookupStub(PCODE stubStartAddress) { WRAPPER_NO_CONTRACT; SUPPORTS_DAC; return GetLookupRangeList()->IsInRange(stubStartAddress); } BOOL isVTableCallStub(PCODE stubStartAddress) { WRAPPER_NO_CONTRACT; SUPPORTS_DAC; return GetVTableCallRangeList()->IsInRange(stubStartAddress); } static BOOL isDispatchingStubStatic(PCODE addr) { WRAPPER_NO_CONTRACT; StubKind stubKind; FindStubManager(addr, &stubKind); return stubKind == SK_DISPATCH; } static BOOL isResolvingStubStatic(PCODE addr) { WRAPPER_NO_CONTRACT; StubKind stubKind; FindStubManager(addr, &stubKind); return stubKind == SK_RESOLVE; } static BOOL isLookupStubStatic(PCODE addr) { WRAPPER_NO_CONTRACT; StubKind stubKind; FindStubManager(addr, &stubKind); return stubKind == SK_LOOKUP; } static BOOL isVtableCallStubStatic(PCODE addr) { WRAPPER_NO_CONTRACT; StubKind stubKind; FindStubManager(addr, &stubKind); return stubKind == SK_VTABLECALL; } //use range lists to track the chunks of memory that are part of each heap LockedRangeList lookup_rangeList; LockedRangeList resolve_rangeList; LockedRangeList dispatch_rangeList; LockedRangeList cache_entry_rangeList; LockedRangeList vtable_rangeList; // Get dac-ized pointers to rangelist. RangeList* GetLookupRangeList() { SUPPORTS_DAC; TADDR addr = PTR_HOST_MEMBER_TADDR(VirtualCallStubManager, this, lookup_rangeList); return PTR_RangeList(addr); } RangeList* GetResolveRangeList() { SUPPORTS_DAC; TADDR addr = PTR_HOST_MEMBER_TADDR(VirtualCallStubManager, this, resolve_rangeList); return PTR_RangeList(addr); } RangeList* GetDispatchRangeList() { SUPPORTS_DAC; TADDR addr = PTR_HOST_MEMBER_TADDR(VirtualCallStubManager, this, dispatch_rangeList); return PTR_RangeList(addr); } RangeList* GetCacheEntryRangeList() { SUPPORTS_DAC; TADDR addr = PTR_HOST_MEMBER_TADDR(VirtualCallStubManager, this, cache_entry_rangeList); return PTR_RangeList(addr); } RangeList* GetVTableCallRangeList() { SUPPORTS_DAC; TADDR addr = PTR_HOST_MEMBER_TADDR(VirtualCallStubManager, this, vtable_rangeList); return PTR_RangeList(addr); } private: //allocate and initialize a stub of the desired kind DispatchHolder *GenerateDispatchStub(PCODE addrOfCode, PCODE addrOfFail, void *pMTExpected, size_t dispatchToken, bool *pMayHaveReenteredCooperativeGCMode); #ifdef TARGET_AMD64 // Used to allocate a long jump dispatch stub. See comment around // m_fShouldAllocateLongJumpDispatchStubs for explaination. DispatchHolder *GenerateDispatchStubLong(PCODE addrOfCode, PCODE addrOfFail, void *pMTExpected, size_t dispatchToken, bool *pMayHaveReenteredCooperativeGCMode); #endif ResolveHolder *GenerateResolveStub(PCODE addrOfResolver, PCODE addrOfPatcher, size_t dispatchToken #if defined(TARGET_X86) && !defined(UNIX_X86_ABI) , size_t stackArgumentsSize #endif ); LookupHolder *GenerateLookupStub(PCODE addrOfResolver, size_t dispatchToken); VTableCallHolder* GenerateVTableCallStub(DWORD slot); template <typename STUB_HOLDER> void AddToCollectibleVSDRangeList(STUB_HOLDER *holder) { if (m_loaderAllocator->IsCollectible()) { parentDomain->GetCollectibleVSDRanges()->AddRange(reinterpret_cast<BYTE *>(holder->stub()), reinterpret_cast<BYTE *>(holder->stub()) + holder->stub()->size(), this); } } // The resolve cache is static across all AppDomains ResolveCacheElem *GenerateResolveCacheElem(void *addrOfCode, void *pMTExpected, size_t token, bool *pMayHaveReenteredCooperativeGCMode); ResolveCacheElem *GetResolveCacheElem(void *pMT, size_t token, void *target); //Given a dispatch token, an object and a method table, determine the //target address to go to. The return value (BOOL) states whether this address //is cacheable or not. static BOOL Resolver(MethodTable * pMT, DispatchToken token, OBJECTREF * protectedObj, PCODE * ppTarget, BOOL throwOnConflict); // This can be used to find a target without needing the ability to throw static BOOL TraceResolver(Object *pObj, DispatchToken token, TraceDestination *trace); public: // Return the MethodDesc corresponding to this token. static MethodDesc *GetRepresentativeMethodDescFromToken(DispatchToken token, MethodTable *pMT); static MethodDesc *GetInterfaceMethodDescFromToken(DispatchToken token); static MethodTable *GetTypeFromToken(DispatchToken token); //This is used to get the token out of a stub static size_t GetTokenFromStub(PCODE stub); //This is used to get the token out of a stub and we know the stub manager and stub kind static size_t GetTokenFromStubQuick(VirtualCallStubManager * pMgr, PCODE stub, StubKind kind); // General utility functions // Quick lookup in the cache. NOTHROW, GC_NOTRIGGER static PCODE CacheLookup(size_t token, UINT16 tokenHash, MethodTable *pMT); // Full exhaustive lookup. THROWS, GC_TRIGGERS static PCODE GetTarget(DispatchToken token, MethodTable *pMT, BOOL throwOnConflict); private: // Given a dispatch token, return true if the token represents an interface, false if just a slot. static BOOL IsInterfaceToken(DispatchToken token); // Given a dispatch token, return true if the token represents a slot on the target. static BOOL IsClassToken(DispatchToken token); #ifdef CHAIN_LOOKUP static ResolveCacheElem* __fastcall PromoteChainEntry(ResolveCacheElem *pElem); #endif // Flags used by the non-x86 versions of VSD_ResolveWorker #define SDF_ResolveBackPatch (0x01) #define SDF_ResolvePromoteChain (0x02) #define SDF_ResolveFlags (0x03) // These method needs to call the instance methods. friend PCODE VSD_ResolveWorker(TransitionBlock * pTransitionBlock, TADDR siteAddrForRegisterIndirect, size_t token #ifndef TARGET_X86 , UINT_PTR flags #endif ); #if defined(TARGET_X86) && defined(TARGET_UNIX) friend void BackPatchWorkerStaticStub(PCODE returnAddr, TADDR siteAddrForRegisterIndirect); #endif //These are the entrypoints that the stubs actually end up calling via the // xxxAsmStub methods above static void STDCALL BackPatchWorkerStatic(PCODE returnAddr, TADDR siteAddrForRegisterIndirect); public: PCODE ResolveWorker(StubCallSite* pCallSite, OBJECTREF *protectedObj, DispatchToken token, StubKind stubKind); void BackPatchWorker(StubCallSite* pCallSite); //Change the callsite to point to stub void BackPatchSite(StubCallSite* pCallSite, PCODE stub); public: /* the following two public functions are to support tracing or stepping thru stubs via the debugger. */ virtual BOOL CheckIsStub_Internal(PCODE stubStartAddress); virtual BOOL TraceManager(Thread *thread, TraceDestination *trace, T_CONTEXT *pContext, BYTE **pRetAddr); size_t GetSize() { LIMITED_METHOD_CONTRACT; size_t retval=0; if(indcell_heap) retval+=indcell_heap->GetSize(); if(cache_entry_heap) retval+=cache_entry_heap->GetSize(); if(lookup_heap) retval+=lookup_heap->GetSize(); if(dispatch_heap) retval+=dispatch_heap->GetSize(); if(resolve_heap) retval+=resolve_heap->GetSize(); return retval; }; private: /* the following two private functions are to support tracing or stepping thru stubs via the debugger. */ virtual BOOL DoTraceStub(PCODE stubStartAddress, TraceDestination *trace); private: // The parent domain of this manager PTR_BaseDomain parentDomain; PTR_LoaderAllocator m_loaderAllocator; BYTE * m_initialReservedMemForHeaps; static const UINT32 INDCELLS_PER_BLOCK = 32; // 32 indirection cells per block. CrstExplicitInit m_indCellLock; // List of free indirection cells. The cells were directly allocated from the loader heap // (code:VirtualCallStubManager::GenerateStubIndirection) BYTE * m_FreeIndCellList; // List of recycled indirection cells. The cells were recycled from finalized dynamic methods // (code:LCGMethodResolver::RecycleIndCells). BYTE * m_RecycledIndCellList; #ifndef DACCESS_COMPILE // This methods returns the a free cell from m_FreeIndCellList. It returns NULL if the list is empty. BYTE * GetOneFreeIndCell() { WRAPPER_NO_CONTRACT; return GetOneIndCell(&m_FreeIndCellList); } // This methods returns the a recycled cell from m_RecycledIndCellList. It returns NULL if the list is empty. BYTE * GetOneRecycledIndCell() { WRAPPER_NO_CONTRACT; return GetOneIndCell(&m_RecycledIndCellList); } // This methods returns the a cell from ppList. It returns NULL if the list is empty. BYTE * GetOneIndCell(BYTE ** ppList) { CONTRACT (BYTE*) { NOTHROW; GC_NOTRIGGER; MODE_ANY; PRECONDITION(CheckPointer(ppList)); PRECONDITION(m_indCellLock.OwnedByCurrentThread()); } CONTRACT_END; BYTE * temp = *ppList; if (temp) { BYTE * pNext = *((BYTE **)temp); *ppList = pNext; RETURN temp; } RETURN NULL; } // insert a linked list of indirection cells at the beginning of m_FreeIndCellList void InsertIntoFreeIndCellList(BYTE * head, BYTE * tail) { WRAPPER_NO_CONTRACT; InsertIntoIndCellList(&m_FreeIndCellList, head, tail); } // insert a linked list of indirection cells at the beginning of ppList void InsertIntoIndCellList(BYTE ** ppList, BYTE * head, BYTE * tail) { CONTRACTL { NOTHROW; GC_NOTRIGGER; PRECONDITION(CheckPointer(ppList)); PRECONDITION(CheckPointer(head)); PRECONDITION(CheckPointer(tail)); PRECONDITION(m_indCellLock.OwnedByCurrentThread()); } CONTRACTL_END; BYTE * temphead = *ppList; *((BYTE**)tail) = temphead; *ppList = head; } #endif // !DACCESS_COMPILE PTR_LoaderHeap indcell_heap; // indirection cells go here PTR_LoaderHeap cache_entry_heap; // resolve cache elem entries go here PTR_LoaderHeap lookup_heap; // lookup stubs go here PTR_LoaderHeap dispatch_heap; // dispatch stubs go here PTR_LoaderHeap resolve_heap; // resolve stubs go here PTR_LoaderHeap vtable_heap; // vtable-based jump stubs go here #ifdef TARGET_AMD64 // When we layout the stub heaps, we put them close together in a sequential order // so that we maximize performance with respect to branch predictions. On AMD64, // dispatch stubs use a rel32 jump on failure to the resolve stub. This works for // a while because of the ordering, but as soon as we have to start allocating more // memory for either the dispatch or resolve heaps we have a chance that we'll be // further away than a rel32 jump can reach, because we're in a 64-bit address // space. As such, this flag will indicate when we allocate the first dispatch stub // that cannot reach a resolve stub, and when this happens we'll switch over to // allocating the larger version of the dispatch stub which contains an abs64 jump. //@TODO: This is a bit of a workaround, but the limitations of LoaderHeap require that we //@TODO: take this approach. Hopefully in Orcas we'll have a chance to rewrite LoaderHeap. BOOL m_fShouldAllocateLongJumpDispatchStubs; // Defaults to FALSE. #endif BucketTable * lookups; // hash table of lookups keyed by tokens BucketTable * cache_entries; // hash table of dispatch token/target structs for dispatch cache BucketTable * dispatchers; // hash table of dispatching stubs keyed by tokens/actualtype BucketTable * resolvers; // hash table of resolvers keyed by tokens/resolverstub BucketTable * vtableCallers; // hash table of vtable call stubs keyed by slot values // This structure is used to keep track of the fail counters. // We only need one fail counter per ResolveStub, // and most programs use less than 250 ResolveStubs // We allocate these on the main heap using "new counter block" struct counter_block { static const UINT32 MAX_COUNTER_ENTRIES = 256-2; // 254 counters should be enough for most cases. counter_block * next; // the next block UINT32 used; // the index of the next free entry INT32 block[MAX_COUNTER_ENTRIES]; // the counters }; counter_block *m_counters; // linked list of counter blocks of failure counters counter_block *m_cur_counter_block; // current block for updating counts counter_block *m_cur_counter_block_for_reclaim; // current block for updating UINT32 m_cur_counter_block_for_reclaim_index; // index into the current block for updating // Used to keep track of all the VCSManager objects in the system. PTR_VirtualCallStubManager m_pNext; // Linked list pointer public: // Given a stub address, find the VCSManager that owns it. static VirtualCallStubManager *FindStubManager(PCODE addr, StubKind* wbStubKind = NULL, BOOL usePredictStubKind = TRUE); #ifndef DACCESS_COMPILE // insert a linked list of indirection cells at the beginning of m_RecycledIndCellList void InsertIntoRecycledIndCellList_Locked(BYTE * head, BYTE * tail) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_ANY; } CONTRACTL_END; CrstHolder lh(&m_indCellLock); InsertIntoIndCellList(&m_RecycledIndCellList, head, tail); } #endif // !DACCESS_COMPILE // These are the counters for keeping statistics struct { UINT32 site_counter; //# of call sites UINT32 stub_lookup_counter; //# of lookup stubs UINT32 stub_poly_counter; //# of resolve stubs UINT32 stub_mono_counter; //# of dispatch stubs UINT32 stub_vtable_counter; //# of vtable call stubs UINT32 site_write; //# of call site backpatch writes UINT32 site_write_poly; //# of call site backpatch writes to point to resolve stubs UINT32 site_write_mono; //# of call site backpatch writes to point to dispatch stubs UINT32 worker_call; //# of calls into ResolveWorker UINT32 worker_call_no_patch; //# of times call_worker resulted in no patch UINT32 worker_collide_to_mono; //# of times we converted a poly stub to a mono stub instead of writing the cache entry UINT32 stub_space; //# of bytes of stubs UINT32 cache_entry_counter; //# of cache structs UINT32 cache_entry_space; //# of bytes used by cache lookup structs } stats; void LogStats(); #ifdef DACCESS_COMPILE protected: virtual void DoEnumMemoryRegions(CLRDataEnumMemoryFlags flags); virtual LPCWSTR GetStubManagerName(PCODE addr) { WRAPPER_NO_CONTRACT; CONSISTENCY_CHECK(isStub(addr)); if (isLookupStub(addr)) { return W("VSD_LookupStub"); } else if (isDispatchingStub(addr)) { return W("VSD_DispatchStub"); } else { CONSISTENCY_CHECK(isResolvingStub(addr)); return W("VSD_ResolveStub"); } } #endif }; /******************************************************************************************************** ********************************************************************************************************/ typedef VPTR(class VirtualCallStubManagerManager) PTR_VirtualCallStubManagerManager; class VirtualCallStubManagerIterator; class VirtualCallStubManagerManager : public StubManager { VPTR_VTABLE_CLASS(VirtualCallStubManagerManager, StubManager) friend class StubManager; friend class VirtualCallStubManager; friend class VirtualCallStubManagerIterator; friend class StubManagerIterator; public: virtual BOOL TraceManager(Thread *thread, TraceDestination *trace, T_CONTEXT *pContext, BYTE **pRetAddr); virtual BOOL CheckIsStub_Internal(PCODE stubStartAddress); virtual BOOL DoTraceStub(PCODE stubStartAddress, TraceDestination *trace); static MethodDesc *Entry2MethodDesc(PCODE stubStartAddress, MethodTable *pMT); #ifdef DACCESS_COMPILE virtual void DoEnumMemoryRegions(CLRDataEnumMemoryFlags flags); virtual LPCWSTR GetStubManagerName(PCODE addr) { WRAPPER_NO_CONTRACT; return FindVirtualCallStubManager(addr)->GetStubManagerName(addr); } #endif private: // Used to keep track of all the VCSManager objects in the system. PTR_VirtualCallStubManager m_pManagers; // Head of the linked list #ifndef DACCESS_COMPILE // Ctor. This is only used by StaticInit. VirtualCallStubManagerManager(); #endif // A cache element to quickly check the last matched manager. Volatile<VirtualCallStubManager*> m_pCacheElem; // RW lock for reading entries and removing them. SimpleRWLock m_RWLock; // This will look through all the managers in an intelligent fashion to // find the manager that owns the address. VirtualCallStubManager *FindVirtualCallStubManager(PCODE stubAddress); protected: // Add a VCSManager to the linked list. void AddStubManager(VirtualCallStubManager *pMgr); // Remove a VCSManager from the linked list. void RemoveStubManager(VirtualCallStubManager *pMgr); VirtualCallStubManager *FirstManager() { WRAPPER_NO_CONTRACT; return m_pManagers; } #ifndef DACCESS_COMPILE static void InitStatic(); #endif public: SPTR_DECL(VirtualCallStubManagerManager, g_pManager); static VirtualCallStubManagerManager *GlobalManager() { LIMITED_METHOD_DAC_CONTRACT; CONSISTENCY_CHECK(CheckPointer(g_pManager)); return g_pManager; } VirtualCallStubManagerIterator IterateVirtualCallStubManagers(); #ifdef _DEBUG // Debug helper to help identify stub-managers. virtual const char * DbgGetName() { LIMITED_METHOD_CONTRACT; return "VirtualCallStubManagerManager"; } #endif }; /******************************************************************************************************** ********************************************************************************************************/ class VirtualCallStubManagerIterator { friend class VirtualCallStubManagerManager; public: BOOL Next(); VirtualCallStubManager *Current(); // Copy ctor inline VirtualCallStubManagerIterator(const VirtualCallStubManagerIterator &it); protected: inline VirtualCallStubManagerIterator(VirtualCallStubManagerManager *pMgr); BOOL m_fIsStart; VirtualCallStubManager *m_pCurMgr; }; ///////////////////////////////////////////////////////////////////////////////////////////// // Ctor inline VirtualCallStubManagerIterator::VirtualCallStubManagerIterator(VirtualCallStubManagerManager *pMgr) : m_fIsStart(TRUE), m_pCurMgr(pMgr->m_pManagers) { LIMITED_METHOD_DAC_CONTRACT; CONSISTENCY_CHECK(CheckPointer(pMgr)); } ///////////////////////////////////////////////////////////////////////////////////////////// // Copy ctor inline VirtualCallStubManagerIterator::VirtualCallStubManagerIterator(const VirtualCallStubManagerIterator &it) : m_fIsStart(it.m_fIsStart), m_pCurMgr(it.m_pCurMgr) { LIMITED_METHOD_DAC_CONTRACT; } /******************************************************************************************************** #StubDispatchNotes A note on approach. The cache and hash tables used by the stub and lookup mechanism are designed with an eye to minimizing interlocking and/or syncing and/or locking operations. They are intended to run in a highly concurrent environment. Since there is no magic, some tradeoffs and and some implementation constraints are required. The basic notion is that if all reads and writes are atomic and if all functions and operations operate correctly in the face of commutative reorderings of the visibility of all reads and writes across threads, then we don't have to interlock, sync, or serialize. Our approximation of this is: 1. All reads and all writes to tables must be atomic. This effectively limits the actual entry size in a table to be a pointer or a pointer sized thing. 2. All functions, like comparisons for equality or computation of hash values must function correctly in the face of concurrent updating of the underlying table. This is accomplished by making the underlying structures/entries effectively immutable, if concurrency is in anyway possible. By effectively immutatable, we mean that the stub or token structure is either immutable or that if it is ever written, all possibley concurrent writes are attempting to write the same value (atomically) or that the competing (atomic) values do not affect correctness, and that the function operates correctly whether or not any of the writes have taken place (is visible yet). The constraint we maintain is that all competeing updates (and their visibility or lack thereof) do not alter the correctness of the program. 3. All tables are inexact. The counts they hold (e.g. number of contained entries) may be inaccurrate, but that inaccurracy cannot affect their correctness. Table modifications, such as insertion of an new entry may not succeed, but such failures cannot affect correctness. This implies that just because a stub/entry is not present in a table, e.g. has been removed, that does not mean that it is not in use. It also implies that internal table structures, such as discarded hash table buckets, cannot be freely recycled since another concurrent thread may still be walking thru it. 4. Occassionaly it is necessary to pick up the pieces that have been dropped on the floor so to speak, e.g. actually recycle hash buckets that aren't in use. Since we have a natural sync point already in the GC, we use that to provide cleanup points. We need to make sure that code that is walking our structures is not a GC safe point. Hence if the GC calls back into us inside the GC sync point, we know that nobody is inside our stuctures and we can safely rearrange and recycle things. ********************************************************************************************************/ //initial and increment value for fail stub counters #ifdef STUB_LOGGING extern UINT32 STUB_MISS_COUNT_VALUE; extern UINT32 STUB_COLLIDE_WRITE_PCT; extern UINT32 STUB_COLLIDE_MONO_PCT; #else // !STUB_LOGGING #define STUB_MISS_COUNT_VALUE 100 #define STUB_COLLIDE_WRITE_PCT 100 #define STUB_COLLIDE_MONO_PCT 0 #endif // !STUB_LOGGING //size and mask of the cache used by resolve stubs // CALL_STUB_CACHE_SIZE must be equal to 2^CALL_STUB_CACHE_NUM_BITS #define CALL_STUB_CACHE_NUM_BITS 12 //10 #define CALL_STUB_CACHE_SIZE 4096 //1024 #define CALL_STUB_CACHE_MASK (CALL_STUB_CACHE_SIZE-1) #define CALL_STUB_CACHE_PROBES 5 //min sizes for BucketTable and buckets and the growth and hashing constants #define CALL_STUB_MIN_BUCKETS 32 #define CALL_STUB_MIN_ENTRIES 4 //this is so that the very first growth will jump from 4 to 32 entries, then double from there. #define CALL_STUB_SECONDARY_ENTRIES 8 #define CALL_STUB_GROWTH_FACTOR 2 #define CALL_STUB_LOAD_FACTOR 90 #define CALL_STUB_HASH_CONST1 1327 #define CALL_STUB_HASH_CONST2 43627 #define LARGE_PRIME 7199369 //internal layout of buckets=size-1,count,entries.... #define CALL_STUB_MASK_INDEX 0 #define CALL_STUB_COUNT_INDEX 1 #define CALL_STUB_DEAD_LINK 2 #define CALL_STUB_FIRST_INDEX 3 //marker entries in cache and hash tables #define CALL_STUB_EMPTY_ENTRY 0 // number of successes for a chained element before it gets moved to the front #define CALL_STUB_CACHE_INITIAL_SUCCESS_COUNT (0x100) /******************************************************************************************************* Entry is an abstract class. We will make specific subclasses for each kind of entry. Entries hold references to stubs or tokens. The principle thing they provide is a virtual Equals function that is used by the caching and hashing tables within which the stubs and tokens are stored. Entries are typically stack allocated by the routines that call into the hash and caching functions, and the functions stuff stubs into the entry to do the comparisons. Essentially specific entry subclasses supply a vtable to a stub as and when needed. This means we don't have to have vtables attached to stubs. Summarizing so far, there is a struct for each kind of stub or token of the form XXXXStub. They provide that actual storage layouts. There is a stuct in which each stub which has code is containted of the form XXXXHolder. They provide alignment and anciliary storage for the stub code. There is a subclass of Entry for each kind of stub or token, of the form XXXXEntry. They provide the specific implementations of the virtual functions declared in Entry. */ class Entry { public: //access and compare the keys of the entry virtual BOOL Equals(size_t keyA, size_t keyB)=0; virtual size_t KeyA()=0; virtual size_t KeyB()=0; //contents is the struct or token that the entry exposes virtual void SetContents(size_t contents)=0; }; /* define the platform specific Stubs and stub holders */ #include <virtualcallstubcpu.hpp> #if USES_LOOKUP_STUBS /********************************************************************************************** LookupEntry wraps LookupStubs and provide the concrete implementation of the abstract class Entry. Virtual and interface call sites when they are first jitted point to LookupStubs. The hash table that contains look up stubs is keyed by token, hence the Equals function uses the embedded token in the stub for comparison purposes. Since we are willing to allow duplicates in the hash table (as long as they are relatively rare) we do use direct comparison of the tokens rather than extracting the fields from within the tokens, for perf reasons. */ class LookupEntry : public Entry { public: //Creates an entry that wraps lookup stub s LookupEntry(size_t s) { LIMITED_METHOD_CONTRACT; _ASSERTE(VirtualCallStubManager::isLookupStubStatic((PCODE)s)); stub = (LookupStub*) s; } //default contructor to allow stack and inline allocation of lookup entries LookupEntry() {LIMITED_METHOD_CONTRACT; stub = NULL;} //implementations of abstract class Entry BOOL Equals(size_t keyA, size_t keyB) { WRAPPER_NO_CONTRACT; return stub && (keyA == KeyA()) && (keyB == KeyB()); } size_t KeyA() { WRAPPER_NO_CONTRACT; return Token(); } size_t KeyB() { WRAPPER_NO_CONTRACT; return (size_t)0; } void SetContents(size_t contents) { LIMITED_METHOD_CONTRACT; _ASSERTE(VirtualCallStubManager::isLookupStubStatic((PCODE)contents)); stub = LookupHolder::FromLookupEntry((PCODE)contents)->stub(); } //extract the token of the underlying lookup stub inline size_t Token() { LIMITED_METHOD_CONTRACT; return stub ? stub->token() : 0; } private: LookupStub* stub; //the stub the entry wrapping }; #endif // USES_LOOKUP_STUBS class VTableCallEntry : public Entry { public: //Creates an entry that wraps vtable call stub VTableCallEntry(size_t s) { LIMITED_METHOD_CONTRACT; _ASSERTE(VirtualCallStubManager::isVtableCallStubStatic((PCODE)s)); stub = (VTableCallStub*)s; } //default contructor to allow stack and inline allocation of vtable call entries VTableCallEntry() { LIMITED_METHOD_CONTRACT; stub = NULL; } //implementations of abstract class Entry BOOL Equals(size_t keyA, size_t keyB) { WRAPPER_NO_CONTRACT; return stub && (keyA == KeyA()) && (keyB == KeyB()); } size_t KeyA() { WRAPPER_NO_CONTRACT; return Token(); } size_t KeyB() { WRAPPER_NO_CONTRACT; return (size_t)0; } void SetContents(size_t contents) { LIMITED_METHOD_CONTRACT; _ASSERTE(VirtualCallStubManager::isVtableCallStubStatic((PCODE)contents)); stub = VTableCallHolder::FromVTableCallEntry((PCODE)contents)->stub(); } //extract the token of the underlying lookup stub inline size_t Token() { LIMITED_METHOD_CONTRACT; return stub ? stub->token() : 0; } private: VTableCallStub* stub; //the stub the entry wrapping }; /********************************************************************************************** ResolveCacheEntry wraps a ResolveCacheElem and provides lookup functionality for entries that were created that may be added to the ResolveCache */ class ResolveCacheEntry : public Entry { public: ResolveCacheEntry(size_t elem) { LIMITED_METHOD_CONTRACT; _ASSERTE(elem != 0); pElem = (ResolveCacheElem*) elem; } //default contructor to allow stack and inline allocation of lookup entries ResolveCacheEntry() { LIMITED_METHOD_CONTRACT; pElem = NULL; } //access and compare the keys of the entry virtual BOOL Equals(size_t keyA, size_t keyB) { WRAPPER_NO_CONTRACT; return pElem && (keyA == KeyA()) && (keyB == KeyB()); } virtual size_t KeyA() { LIMITED_METHOD_CONTRACT; return pElem != NULL ? pElem->token : 0; } virtual size_t KeyB() { LIMITED_METHOD_CONTRACT; return pElem != NULL ? (size_t) pElem->pMT : 0; } //contents is the struct or token that the entry exposes virtual void SetContents(size_t contents) { LIMITED_METHOD_CONTRACT; pElem = (ResolveCacheElem*) contents; } inline const BYTE *Target() { LIMITED_METHOD_CONTRACT; return pElem != NULL ? (const BYTE *)pElem->target : NULL; } private: ResolveCacheElem *pElem; }; /********************************************************************************************** ResolveEntry wraps ResolveStubs and provide the concrete implementation of the abstract class Entry. Polymorphic call sites and monomorphic calls that fail end up in a ResolveStub. Resolve stubs are stored in hash tables keyed by token, hence the Equals function uses the embedded token in the stub for comparison purposes. Since we are willing to allow duplicates in the hash table (as long as they are relatively rare) we do use direct comparison of the tokens rather than extracting the fields from within the tokens, for perf reasons. */ class ResolveEntry : public Entry { public: //Creates an entry that wraps resolve stub s ResolveEntry (size_t s) { LIMITED_METHOD_CONTRACT; _ASSERTE(VirtualCallStubManager::isResolvingStubStatic((PCODE)s)); stub = (ResolveStub*) s; } //default contructor to allow stack and inline allocation of resovler entries ResolveEntry() { LIMITED_METHOD_CONTRACT; stub = CALL_STUB_EMPTY_ENTRY; } //implementations of abstract class Entry inline BOOL Equals(size_t keyA, size_t keyB) { WRAPPER_NO_CONTRACT; return stub && (keyA == KeyA()) && (keyB == KeyB()); } inline size_t KeyA() { WRAPPER_NO_CONTRACT; return Token(); } inline size_t KeyB() { WRAPPER_NO_CONTRACT; return (size_t)0; } void SetContents(size_t contents) { LIMITED_METHOD_CONTRACT; _ASSERTE(VirtualCallStubManager::isResolvingStubStatic((PCODE)contents)); stub = ResolveHolder::FromResolveEntry((PCODE)contents)->stub(); } //extract the token of the underlying resolve stub inline size_t Token() { WRAPPER_NO_CONTRACT; return stub ? (size_t)(stub->token()) : 0; } private: ResolveStub* stub; //the stub the entry is wrapping }; /********************************************************************************************** DispatchEntry wraps DispatchStubs and provide the concrete implementation of the abstract class Entry. Monomorphic and mostly monomorphic call sites eventually point to DispatchStubs. Dispatch stubs are placed in hash and cache tables keyed by the expected Method Table and token they are built for. Since we are willing to allow duplicates in the hash table (as long as they are relatively rare) we do use direct comparison of the tokens rather than extracting the fields from within the tokens, for perf reasons.*/ class DispatchEntry : public Entry { public: //Creates an entry that wraps dispatch stub s DispatchEntry (size_t s) { LIMITED_METHOD_CONTRACT; _ASSERTE(VirtualCallStubManager::isDispatchingStubStatic((PCODE)s)); stub = (DispatchStub*) s; } //default contructor to allow stack and inline allocation of resovler entries DispatchEntry() { LIMITED_METHOD_CONTRACT; stub = CALL_STUB_EMPTY_ENTRY; } //implementations of abstract class Entry inline BOOL Equals(size_t keyA, size_t keyB) { WRAPPER_NO_CONTRACT; return stub && (keyA == KeyA()) && (keyB == KeyB()); } inline size_t KeyA() { WRAPPER_NO_CONTRACT; return Token(); } inline size_t KeyB() { WRAPPER_NO_CONTRACT; return ExpectedMT();} void SetContents(size_t contents) { LIMITED_METHOD_CONTRACT; _ASSERTE(VirtualCallStubManager::isDispatchingStubStatic((PCODE)contents)); stub = DispatchHolder::FromDispatchEntry((PCODE)contents)->stub(); } //extract the fields of the underlying dispatch stub inline size_t ExpectedMT() { WRAPPER_NO_CONTRACT; return stub ? (size_t)(stub->expectedMT()) : 0; } size_t Token() { WRAPPER_NO_CONTRACT; if (stub) { ResolveHolder * resolveHolder = ResolveHolder::FromFailEntry(stub->failTarget()); size_t token = resolveHolder->stub()->token(); _ASSERTE(token == VirtualCallStubManager::GetTokenFromStub((PCODE)stub)); return token; } else { return 0; } } inline PCODE Target() { WRAPPER_NO_CONTRACT; return stub ? stub->implTarget() : 0; } private: DispatchStub* stub; }; /************************************************************************************************* DispatchCache is the cache table that the resolve stubs use for inline polymorphic resolution of a call. The cache entry is logically a triplet of (method table, token, impl address) where method table is the type of the calling frame's <this>, token identifies the method being invoked, i.e. is a (type id,slot #) pair, and impl address is the address of the method implementation. */ class DispatchCache { public: static const UINT16 INVALID_HASH = (UINT16)(-1); DispatchCache(); //read and write the cache keyed by (method table,token) pair. inline ResolveCacheElem* Lookup(size_t token, void* mt) { WRAPPER_NO_CONTRACT; return Lookup(token, INVALID_HASH, mt);} ResolveCacheElem* Lookup(size_t token, UINT16 tokenHash, void* mt); enum InsertKind {IK_NONE, IK_DISPATCH, IK_RESOLVE, IK_SHARED, IK_EXTERNAL}; BOOL Insert(ResolveCacheElem* elem, InsertKind insertKind); #ifdef CHAIN_LOOKUP void PromoteChainEntry(ResolveCacheElem* elem); #endif // This is the heavyweight hashing algorithm. Use sparingly. static UINT16 HashToken(size_t token); inline void GetLoadFactor(size_t *total, size_t *used) { LIMITED_METHOD_CONTRACT; *total = CALL_STUB_CACHE_SIZE; size_t count = 0; for (size_t i = 0; i < CALL_STUB_CACHE_SIZE; i++) if (cache[i] != empty) count++; *used = count; } inline void *GetCacheBaseAddr() { LIMITED_METHOD_CONTRACT; return &cache[0]; } inline size_t GetCacheCount() { LIMITED_METHOD_CONTRACT; return CALL_STUB_CACHE_SIZE; } inline ResolveCacheElem *GetCacheEntry(size_t idx) { LIMITED_METHOD_CONTRACT; return VolatileLoad(&cache[idx]); } inline BOOL IsCacheEntryEmpty(size_t idx) { LIMITED_METHOD_CONTRACT; return cache[idx] == empty; } inline void SetCacheEntry(size_t idx, ResolveCacheElem *elem) { LIMITED_METHOD_CONTRACT; #ifdef STUB_LOGGING cacheData[idx].numWrites++; #endif #ifdef CHAIN_LOOKUP CONSISTENCY_CHECK(m_writeLock.OwnedByCurrentThread()); #endif cache[idx] = elem; } inline void ClearCacheEntry(size_t idx) { LIMITED_METHOD_CONTRACT; #ifdef STUB_LOGGING cacheData[idx].numClears++; #endif cache[idx] = empty; } struct { UINT32 insert_cache_external; //# of times Insert was called for IK_EXTERNAL UINT32 insert_cache_shared; //# of times Insert was called for IK_SHARED UINT32 insert_cache_dispatch; //# of times Insert was called for IK_DISPATCH UINT32 insert_cache_resolve; //# of times Insert was called for IK_RESOLVE UINT32 insert_cache_hit; //# of times Insert found an empty cache entry UINT32 insert_cache_miss; //# of times Insert already had a matching cache entry UINT32 insert_cache_collide; //# of times Insert found a used cache entry UINT32 insert_cache_write; //# of times Insert wrote a cache entry } stats; void LogStats(); // Unlocked iterator of entries. Use only when read/write access to the cache // is safe. This would typically be at GC sync points, currently needed during // appdomain unloading. class Iterator { public: Iterator(DispatchCache *pCache); inline BOOL IsValid() { WRAPPER_NO_CONTRACT; return (m_curBucket < (INT32)m_pCache->GetCacheCount()); } void Next(); // Unlink the current entry. // **NOTE** Using this method implicitly performs a call to Next to move // past the unlinked entry. Thus, one could accidentally skip // entries unless you take this into consideration. ResolveCacheElem *UnlinkEntry(); inline ResolveCacheElem *Entry() { LIMITED_METHOD_CONTRACT; CONSISTENCY_CHECK(IsValid()); return *m_ppCurElem; } private: void NextValidBucket(); inline void NextBucket() { LIMITED_METHOD_CONTRACT; m_curBucket++; m_ppCurElem = &m_pCache->cache[m_curBucket]; } DispatchCache *m_pCache; INT32 m_curBucket; ResolveCacheElem **m_ppCurElem; }; private: #ifdef CHAIN_LOOKUP Crst m_writeLock; #endif //the following hash computation is also inlined in the resolve stub in asm (SO NO TOUCHIE) inline static UINT16 HashMT(UINT16 tokenHash, void* mt) { LIMITED_METHOD_CONTRACT; UINT16 hash; size_t mtHash = (size_t) mt; mtHash = (((mtHash >> CALL_STUB_CACHE_NUM_BITS) + mtHash) >> LOG2_PTRSIZE) & CALL_STUB_CACHE_MASK; hash = (UINT16) mtHash; hash ^= (tokenHash & CALL_STUB_CACHE_MASK); return hash; } ResolveCacheElem* cache[CALL_STUB_CACHE_SIZE]; //must be first ResolveCacheElem* empty; //empty entry, initialized to fail all comparisons #ifdef STUB_LOGGING public: struct CacheEntryData { UINT32 numWrites; UINT16 numClears; }; CacheEntryData cacheData[CALL_STUB_CACHE_SIZE]; #endif // STUB_LOGGING }; /************************************************************************************************** The hash tables are accessed via instances of the Prober. Prober is a probe into a bucket of the hash table, and therefore has an index which is the current probe position. It includes a count of the number of probes done in that bucket so far and a stride to step thru the bucket with. To do comparisons, it has a reference to an entry with which it can do comparisons (Equals(...)) of the entries (stubs) inside the hash table. It also has the key pair (keyA, keyB) that it is looking for. Typically, an entry of the appropriate type is created on the stack and then the prober is created passing in a reference to the entry. The prober is used for a complete operation, such as look for and find an entry (stub), creating and inserting it as necessary. The initial index and the stride are orthogonal hashes of the key pair, i.e. we are doing a varient of double hashing. When we initialize the prober (see FormHash below) we set the initial probe based on one hash. The stride (used as a modulo addition of the probe position) is based on a different hash and is such that it will vist every location in the bucket before repeating. Hence it is imperative that the bucket size and the stride be relative prime wrt each other. We have chosen to make bucket sizes a power of 2, so we force stride to be odd. Note -- it must be assumed that multiple probers are walking the same tables and buckets at the same time. Additionally, the counts may not be accurate, and there may be duplicates in the tables. Since the tables do not allow concurrrent deletion, some of the concurrency issues are ameliorated. */ class Prober { friend class FastTable; friend class BucketTable; public: Prober(Entry* e) {LIMITED_METHOD_CONTRACT; comparer = e;} //find the requested entry, if not there return CALL_STUB_EMPTY_ENTRY size_t Find(); //add the entry into the bucket, if it is not already in the bucket. //return the entry actually in the bucket (existing or added) size_t Add(size_t entry); private: //return the bucket (FastTable*) that the prober is currently walking inline size_t* items() {LIMITED_METHOD_CONTRACT; return &base[-CALL_STUB_FIRST_INDEX];} //are there more probes possible, or have we probed everything in the bucket inline BOOL NoMore() {LIMITED_METHOD_CONTRACT; return probes>mask;} //both probes and mask are (-1) //advance the probe to a new place in the bucket inline BOOL Next() { WRAPPER_NO_CONTRACT; index = (index + stride) & mask; probes++; return !NoMore(); } inline size_t Read() { LIMITED_METHOD_CONTRACT; _ASSERTE(base); return VolatileLoad(&base[index]); } //initialize a prober across a bucket (table) for the specified keys. void InitProber(size_t key1, size_t key2, size_t* table); //set up the initial index and stride and probe count inline void FormHash() { LIMITED_METHOD_CONTRACT; probes = 0; //these two hash functions have not been formally measured for effectiveness //but they are at least orthogonal size_t a = ((keyA>>16) + keyA); size_t b = ((keyB>>16) ^ keyB); index = (((a*CALL_STUB_HASH_CONST1)>>4)+((b*CALL_STUB_HASH_CONST2)>>4)+CALL_STUB_HASH_CONST1) & mask; stride = ((a+(b*CALL_STUB_HASH_CONST1)+CALL_STUB_HASH_CONST2) | 1) & mask; } //atomically grab an empty slot so we can insert a new entry into the bucket BOOL GrabEntry(size_t entryValue); size_t keyA; //key pair we are looking for size_t keyB; size_t* base; //we have our own pointer to the bucket, so races don't matter. // We won't care if we do the lookup in an // outdated bucket (has grown out from under us). // All that will happen is possibly dropping an entry // on the floor or adding a duplicate. size_t index; //current probe point in the bucket size_t stride; //amount to step on each successive probe, must be relatively prime wrt the bucket size size_t mask; //size of bucket - 1 size_t probes; //number probes - 1 Entry* comparer;//used to compare an entry against the sought after key pair }; /******************************************************************************************************** FastTable is used to implement the buckets of a BucketTable, a bucketized hash table. A FastTable is an array of entries (contents). The first two slots of contents store the size-1 and count of entries actually in the FastTable. Note that the count may be inaccurate and there may be duplicates. Careful attention must be paid to eliminate the need for interlocked or serialized or locked operations in face of concurrency. */ #ifdef _MSC_VER #pragma warning(push) #pragma warning(disable : 4200) // disable zero-sized array warning #endif // _MSC_VER class FastTable { friend class BucketTable; public: private: FastTable() { LIMITED_METHOD_CONTRACT; } ~FastTable() { LIMITED_METHOD_CONTRACT; } //initialize a prober for the specified keys. inline BOOL SetUpProber(size_t keyA, size_t keyB, Prober* probe) { CONTRACTL { NOTHROW; GC_NOTRIGGER; FORBID_FAULT; } CONTRACTL_END; _ASSERTE(probe); _ASSERTE(contents); probe->InitProber(keyA, keyB, &contents[0]); return TRUE; } //find the requested entry (keys of prober), if not there return CALL_STUB_EMPTY_ENTRY size_t Find(Prober* probe); //add the entry, if it is not already there. Probe is used to search. //Return the entry actually containted (existing or added) size_t Add(size_t entry, Prober* probe); void IncrementCount(); // Create a FastTable with space for numberOfEntries. Please note that this method // does not throw on OOM. **YOU MUST CHECK FOR NULL RETURN** static FastTable* MakeTable(size_t numberOfEntries) { CONTRACTL { THROWS; GC_TRIGGERS; INJECT_FAULT(COMPlusThrowOM();); } CONTRACTL_END; size_t size = CALL_STUB_MIN_ENTRIES; while (size < numberOfEntries) {size = size<<1;} // if (size == CALL_STUB_MIN_ENTRIES) // size += 3; size_t* bucket = new size_t[(sizeof(FastTable)/sizeof(size_t))+size+CALL_STUB_FIRST_INDEX]; FastTable* table = new (bucket) FastTable(); table->InitializeContents(size); return table; } //Initialize as empty void InitializeContents(size_t size) { LIMITED_METHOD_CONTRACT; memset(&contents[0], CALL_STUB_EMPTY_ENTRY, (size+CALL_STUB_FIRST_INDEX)*sizeof(BYTE*)); contents[CALL_STUB_MASK_INDEX] = size-1; } inline size_t tableMask() {LIMITED_METHOD_CONTRACT; return (size_t) (contents[CALL_STUB_MASK_INDEX]);} inline size_t tableSize() {LIMITED_METHOD_CONTRACT; return tableMask()+1;} inline size_t tableCount() {LIMITED_METHOD_CONTRACT; return (size_t) (contents[CALL_STUB_COUNT_INDEX]);} inline BOOL isFull() { LIMITED_METHOD_CONTRACT; return (tableCount()+1) * 100 / CALL_STUB_LOAD_FACTOR >= tableSize(); } //we store (size-1) in bucket[CALL_STUB_MASK_INDEX==0], //we store the used count in bucket[CALL_STUB_COUNT_INDEX==1], //we have an unused cell to use as a temp at bucket[CALL_STUB_DEAD_LINK==2], //and the table starts at bucket[CALL_STUB_FIRST_INDEX==3], size_t contents[0]; }; #ifdef _MSC_VER #pragma warning(pop) #endif /****************************************************************************************************** BucketTable is a bucketized hash table. It uses FastTables for its buckets. The hash tables used by the VirtualCallStubManager are BucketTables. The number of buckets is fixed at the time the table is created. The actual buckets are allocated as needed, and grow as necessary. The reason for using buckets is primarily to reduce the cost of growing, since only a single bucket is actually grown at any given time. Since the hash tables are accessed infrequently, the load factor that controls growth is quite high (90%). Since we use hashing to pick the bucket, and we use hashing to lookup inside the bucket, it is important that the hashing function used here is orthogonal to the ones used in the buckets themselves (see FastTable::FormHash). */ class BucketTable { public: BucketTable(size_t numberOfBuckets) { WRAPPER_NO_CONTRACT; size_t size = CALL_STUB_MIN_BUCKETS; while (size < numberOfBuckets) {size = size<<1;} buckets = AllocateBuckets(size); // Initialize statistics counters memset(&stats, 0, sizeof(stats)); } ~BucketTable() { LIMITED_METHOD_CONTRACT; if(buckets != NULL) { size_t size = bucketCount()+CALL_STUB_FIRST_INDEX; for(size_t ix = CALL_STUB_FIRST_INDEX; ix < size; ix++) delete (FastTable*)(buckets[ix]); delete buckets; } } //initialize a prober for the specified keys. BOOL SetUpProber(size_t keyA, size_t keyB, Prober *prober); //find the requested entry (keys of prober), if not there return CALL_STUB_EMPTY_ENTRY inline size_t Find(Prober* probe) {WRAPPER_NO_CONTRACT; return probe->Find();} //add the entry, if it is not already there. Probe is used to search. size_t Add(size_t entry, Prober* probe); //reclaim abandoned buckets. Buckets are abaondoned when they need to grow. //needs to be called inside a gc sync point. static void Reclaim(); struct { UINT32 bucket_space; //# of bytes in caches and tables, not including the stubs themselves UINT32 bucket_space_dead; //# of bytes of abandoned buckets not yet recycled. } stats; void LogStats(); private: inline size_t bucketMask() {LIMITED_METHOD_CONTRACT; return (size_t) (buckets[CALL_STUB_MASK_INDEX]);} inline size_t bucketCount() {LIMITED_METHOD_CONTRACT; return bucketMask()+1;} inline size_t ComputeBucketIndex(size_t keyA, size_t keyB) { LIMITED_METHOD_CONTRACT; size_t a = ((keyA>>16) + keyA); size_t b = ((keyB>>16) ^ keyB); return CALL_STUB_FIRST_INDEX+(((((a*CALL_STUB_HASH_CONST2)>>5)^((b*CALL_STUB_HASH_CONST1)>>5))+CALL_STUB_HASH_CONST2) & bucketMask()); } //grows the bucket referenced by probe. BOOL GetMoreSpace(const Prober* probe); //creates storage in which to store references to the buckets static size_t* AllocateBuckets(size_t size) { LIMITED_METHOD_CONTRACT; size_t* buckets = new size_t[size+CALL_STUB_FIRST_INDEX]; if (buckets != NULL) { memset(&buckets[0], CALL_STUB_EMPTY_ENTRY, (size+CALL_STUB_FIRST_INDEX)*sizeof(void*)); buckets[CALL_STUB_MASK_INDEX] = size-1; } return buckets; } inline size_t Read(size_t index) { LIMITED_METHOD_CONTRACT; CONSISTENCY_CHECK(index <= bucketMask()+CALL_STUB_FIRST_INDEX); return VolatileLoad(&buckets[index]); } #ifdef _MSC_VER #pragma warning(disable: 4267) //work-around for the compiler #endif inline void Write(size_t index, size_t value) { LIMITED_METHOD_CONTRACT; CONSISTENCY_CHECK(index <= bucketMask()+CALL_STUB_FIRST_INDEX); VolatileStore(&buckets[index], value); } #ifdef _MSC_VER #pragma warning(default: 4267) #endif // We store (#buckets-1) in bucket[CALL_STUB_MASK_INDEX ==0] // We have two unused cells at bucket[CALL_STUB_COUNT_INDEX ==1] // and bucket[CALL_STUB_DEAD_LINK ==2] // and the table starts at bucket[CALL_STUB_FIRST_INDEX ==3] // the number of elements is bucket[CALL_STUB_MASK_INDEX]+CALL_STUB_FIRST_INDEX size_t* buckets; static FastTable* dead; //linked list head of to be deleted (abandoned) buckets }; #endif // !_VIRTUAL_CALL_STUB_H
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // File: VirtualCallStub.h // // // See code:VirtualCallStubManager for details // // ============================================================================ #ifndef _VIRTUAL_CALL_STUB_H #define _VIRTUAL_CALL_STUB_H #define CHAIN_LOOKUP #if defined(TARGET_X86) // If this is uncommented, leaves a file "StubLog_<pid>.log" with statistics on the behavior // of stub-based interface dispatch. //#define STUB_LOGGING #endif #include "stubmgr.h" ///////////////////////////////////////////////////////////////////////////////////// // Forward class declarations class FastTable; class BucketTable; class Entry; class Prober; class VirtualCallStubManager; class VirtualCallStubManagerManager; struct LookupHolder; struct DispatchHolder; struct ResolveHolder; struct VTableCallHolder; ///////////////////////////////////////////////////////////////////////////////////// // Forward function declarations extern "C" void InContextTPQuickDispatchAsmStub(); extern "C" PCODE STDCALL VSD_ResolveWorker(TransitionBlock * pTransitionBlock, TADDR siteAddrForRegisterIndirect, size_t token #ifndef TARGET_X86 , UINT_PTR flags #endif ); ///////////////////////////////////////////////////////////////////////////////////// #if defined(TARGET_X86) || defined(TARGET_AMD64) typedef INT32 DISPL; #endif ///////////////////////////////////////////////////////////////////////////////////// // Represents the struct that is added to the resolve cache // NOTE: If you change the layout of this struct, you'll need to update various // ASM helpers in VirtualCallStubCpu that rely on offsets of members. // struct ResolveCacheElem { void *pMT; size_t token; // DispatchToken void *target; // These are used for chaining ResolveCacheElem *pNext; ResolveCacheElem *Next() { LIMITED_METHOD_CONTRACT; return VolatileLoad(&pNext); } #ifdef _DEBUG UINT16 debug_hash; UINT16 debug_index; #endif // _DEBUG BOOL Equals(size_t token, void *pMT) { LIMITED_METHOD_CONTRACT; return (this->pMT == pMT && this->token == token); } BOOL Equals(ResolveCacheElem *pElem) { WRAPPER_NO_CONTRACT; return Equals(pElem->token, pElem->pMT); } }; enum { e_resolveCacheElem_sizeof_mt = sizeof(void *), e_resolveCacheElem_sizeof_token = sizeof(size_t), e_resolveCacheElem_sizeof_target = sizeof(void *), e_resolveCacheElem_sizeof_next = sizeof(ResolveCacheElem *), e_resolveCacheElem_offset_mt = 0, e_resolveCacheElem_offset_token = e_resolveCacheElem_offset_mt + e_resolveCacheElem_sizeof_mt, e_resolveCacheElem_offset_target = e_resolveCacheElem_offset_token + e_resolveCacheElem_sizeof_token, e_resolveCacheElem_offset_next = e_resolveCacheElem_offset_target + e_resolveCacheElem_sizeof_target, }; ///////////////////////////////////////////////////////////////////////////////////// // A utility class to help manipulate a call site struct StubCallSite { friend class VirtualCallStubManager; private: // On x86 are four possible kinds of callsites when you take into account all features // Relative: direct call, e.g. "call addr". Not used currently. // RelativeIndirect (JmpRel): indirect call through a relative address, e.g. "call [addr]" // RegisterIndirect: indirect call through a register, e.g. "call [eax]" // DelegateCallSite: anything else, tail called through a register by shuffle thunk, e.g. "jmp [eax]" // // On all other platforms we always use an indirect call through an indirection cell // In these cases all calls are made by the platform equivalent of "call [addr]". // // DelegateCallSite are particular in that they can come in a variety of forms: // a direct delegate call has a sequence defined by the jit but a multicast or wrapper delegate // are defined in a stub and have a different shape // PTR_PCODE m_siteAddr; // Stores the address of an indirection cell PCODE m_returnAddr; public: #if defined(TARGET_X86) StubCallSite(TADDR siteAddrForRegisterIndirect, PCODE returnAddr); PCODE GetCallerAddress(); #else // !defined(TARGET_X86) // On platforms where we always use an indirection cell things // are much simpler - the siteAddr always stores a pointer to a // value that in turn points to the indirection cell. StubCallSite(TADDR siteAddr, PCODE returnAddr) { LIMITED_METHOD_CONTRACT; m_siteAddr = dac_cast<PTR_PCODE>(siteAddr); m_returnAddr = returnAddr; } PCODE GetCallerAddress() { LIMITED_METHOD_CONTRACT; return m_returnAddr; } #endif // !defined(TARGET_X86) PCODE GetSiteTarget() { WRAPPER_NO_CONTRACT; return *(GetIndirectCell()); } void SetSiteTarget(PCODE newTarget); PTR_PCODE GetIndirectCell() { LIMITED_METHOD_CONTRACT; return dac_cast<PTR_PCODE>(m_siteAddr); } PTR_PCODE * GetIndirectCellAddress() { LIMITED_METHOD_CONTRACT; return &m_siteAddr; } PCODE GetReturnAddress() { LIMITED_METHOD_CONTRACT; return m_returnAddr; } }; // These are the assembly language entry points that the stubs use when they want to go into the EE extern "C" void ResolveWorkerAsmStub(); // resolve a token and transfer control to that method extern "C" void ResolveWorkerChainLookupAsmStub(); // for chaining of entries in the cache #ifdef TARGET_X86 extern "C" void BackPatchWorkerAsmStub(); // backpatch a call site to point to a different stub #ifdef TARGET_UNIX extern "C" void BackPatchWorkerStaticStub(PCODE returnAddr, TADDR siteAddrForRegisterIndirect); #endif // TARGET_UNIX #endif // TARGET_X86 typedef VPTR(class VirtualCallStubManager) PTR_VirtualCallStubManager; // VirtualCallStubManager is the heart of the stub dispatch logic. See the book of the runtime entry // // file:../../doc/BookOfTheRuntime/ClassLoader/VirtualStubDispatchDesign.doc // // The basic idea is that a call to an interface (it could also be used for virtual calls in general, but we // do not do this), is simply the code // // call [DispatchCell] // // Where we make sure 'DispatchCell' points at stubs that will do the right thing. DispatchCell is writable // so we can udpate the code over time. There are three basic types of stubs that the dispatch cell can point // to. // * Lookup: The intial stub that has no 'fast path' and simply pushes a ID for interface being called // and calls into the runtime at code:VirtualCallStubManager.ResolveWorkerStatic. // * Dispatch: Lookup stubs are patched to this stub which has a fast path that checks for a particular // Method Table and if that fails jumps to code that // * Decrements a 'missCount' (starts out as code:STUB_MISS_COUNT_VALUE). If this count goes to zero // code:VirtualCallStubManager.BackPatchWorkerStatic is called, morphs it into a resolve stub // (however since this decrementing logic is SHARED among all dispatch stubs, it may take // multiples of code:STUB_MISS_COUNT_VALUE if mulitple call sites are actively polymorphic (this // seems unlikley). // * Calls a resolve stub (Whenever a dispatch stub is created, it always has a cooresponding resolve // stub (but the resolve stubs are shared among many dispatch stubs). // * Resolve: see code:ResolveStub. This looks up the Method table in a process wide cache (see // code:ResolveCacheElem, and if found, jumps to it. This code path is about 17 instructions long (so // pretty fast, but certainly much slower than a normal call). If the method table is not found in // the cache, it calls into the runtime code:VirtualCallStubManager.ResolveWorkerStatic, which // populates it. // So the general progression is call site's cells // * start out life pointing to a lookup stub // * On first call they get updated into a dispatch stub. When this misses, it calls a resolve stub, // which populates a resovle stub's cache, but does not update the call site' cell (thus it is still // pointing at the dispatch cell. // * After code:STUB_MISS_COUNT_VALUE misses, we update the call site's cell to point directly at the // resolve stub (thus avoiding the overhead of the quick check that always seems to be failing and // the miss count update). // // QUESTION: What is the lifetimes of the various stubs and hash table entries? // // QUESTION: There does not seem to be any logic that will change a call site's cell once it becomes a // Resolve stub. Thus once a particular call site becomes a Resolve stub we live with the Resolve stub's // (in)efficiency forever. // // see code:#StubDispatchNotes for more class VirtualCallStubManager : public StubManager { friend class VirtualCallStubManagerManager; friend class VirtualCallStubManagerIterator; #if defined(DACCESS_COMPILE) friend class ClrDataAccess; friend class DacDbiInterfaceImpl; #endif // DACCESS_COMPILE VPTR_VTABLE_CLASS(VirtualCallStubManager, StubManager) public: #ifdef _DEBUG virtual const char * DbgGetName() { LIMITED_METHOD_CONTRACT; return "VirtualCallStubManager"; } #endif // The reason for our existence, return a callstub for type id and slot number // where type id = 0 for the class contract (i.e. a virtual call), and type id > 0 for an // interface invoke where the id indicates which interface it is. // // The function is idempotent, i.e. // you'll get the same callstub twice if you call it with identical inputs. PCODE GetCallStub(TypeHandle ownerType, MethodDesc *pMD); PCODE GetCallStub(TypeHandle ownerType, DWORD slot); // Stubs for vtable-based virtual calls with no lookups PCODE GetVTableCallStub(DWORD slot); // Generate an fresh indirection cell. BYTE* GenerateStubIndirection(PCODE stub, BOOL fUseRecycledCell = FALSE); // Set up static data structures - called during EEStartup static void InitStatic(); static void UninitStatic(); // Per instance initialization - called during AppDomain::Init and ::Uninit and for collectible loader allocators void Init(BaseDomain* pDomain, LoaderAllocator *pLoaderAllocator); void Uninit(); //@TODO: the logging should be tied into the VMs normal loggin mechanisms, //@TODO: for now we just always write a short log file called "StubLog_<pid>.log" static void StartupLogging(); static void LoggingDump(); static void FinishLogging(); static void ResetCache(); // Reclaim/rearrange any structures that can only be done during a gc sync point. // This is the mechanism we are using to avoid synchronization of alot of our // cache and hash table accesses. We are requiring that during a gc sync point we are not // executing any stub code at all, hence at this time we are serialized on a single thread (gc) // and no other thread is accessing the data structures. static void ReclaimAll(); void Reclaim(); #ifndef DACCESS_COMPILE VirtualCallStubManager() : StubManager(), lookup_rangeList(), resolve_rangeList(), dispatch_rangeList(), cache_entry_rangeList(), vtable_rangeList(), parentDomain(NULL), m_loaderAllocator(NULL), m_initialReservedMemForHeaps(NULL), m_FreeIndCellList(NULL), m_RecycledIndCellList(NULL), indcell_heap(NULL), cache_entry_heap(NULL), lookup_heap(NULL), dispatch_heap(NULL), resolve_heap(NULL), #ifdef TARGET_AMD64 m_fShouldAllocateLongJumpDispatchStubs(FALSE), #endif lookups(NULL), cache_entries(NULL), dispatchers(NULL), resolvers(NULL), m_counters(NULL), m_cur_counter_block(NULL), m_cur_counter_block_for_reclaim(NULL), m_cur_counter_block_for_reclaim_index(NULL), m_pNext(NULL) { LIMITED_METHOD_CONTRACT; ZeroMemory(&stats, sizeof(stats)); } ~VirtualCallStubManager(); #endif // !DACCESS_COMPILE enum StubKind { SK_UNKNOWN, SK_LOOKUP, // Lookup Stubs are SLOW stubs that simply call into the runtime to do all work. SK_DISPATCH, // Dispatch Stubs have a fast check for one type otherwise jumps to runtime. Works for monomorphic sites SK_RESOLVE, // Resolve Stubs do a hash lookup before fallling back to the runtime. Works for polymorphic sites. SK_VTABLECALL, // Stub that jumps to a target method using vtable-based indirections. Works for non-interface calls. SK_BREAKPOINT }; // peek at the assembly code and predict which kind of a stub we have StubKind predictStubKind(PCODE stubStartAddress); /* know thine own stubs. It is possible that when multiple virtualcallstub managers are built that these may need to become non-static, and the callers modified accordingly */ StubKind getStubKind(PCODE stubStartAddress, BOOL usePredictStubKind = TRUE) { WRAPPER_NO_CONTRACT; SUPPORTS_DAC; // This method can called with stubStartAddress==NULL, e.g. when handling null reference exceptions // caused by IP=0. Early out for this case to avoid confusing handled access violations inside predictStubKind. if (PCODEToPINSTR(stubStartAddress) == NULL) return SK_UNKNOWN; // Rather than calling IsInRange(stubStartAddress) for each possible stub kind // we can peek at the assembly code and predict which kind of a stub we have StubKind predictedKind = (usePredictStubKind) ? predictStubKind(stubStartAddress) : SK_UNKNOWN; if (predictedKind == SK_DISPATCH) { if (isDispatchingStub(stubStartAddress)) return SK_DISPATCH; } else if (predictedKind == SK_LOOKUP) { if (isLookupStub(stubStartAddress)) return SK_LOOKUP; } else if (predictedKind == SK_RESOLVE) { if (isResolvingStub(stubStartAddress)) return SK_RESOLVE; } else if (predictedKind == SK_VTABLECALL) { if (isVTableCallStub(stubStartAddress)) return SK_VTABLECALL; } // This is the slow case. If the predict returned SK_UNKNOWN, SK_BREAKPOINT, // or the predict was found to be incorrect when checked against the RangeLists // (isXXXStub), then we'll check each stub heap in sequence. if (isDispatchingStub(stubStartAddress)) return SK_DISPATCH; else if (isLookupStub(stubStartAddress)) return SK_LOOKUP; else if (isResolvingStub(stubStartAddress)) return SK_RESOLVE; else if (isVTableCallStub(stubStartAddress)) return SK_VTABLECALL; return SK_UNKNOWN; } inline BOOL isStub(PCODE stubStartAddress) { WRAPPER_NO_CONTRACT; SUPPORTS_DAC; return (getStubKind(stubStartAddress) != SK_UNKNOWN); } BOOL isDispatchingStub(PCODE stubStartAddress) { WRAPPER_NO_CONTRACT; SUPPORTS_DAC; return GetDispatchRangeList()->IsInRange(stubStartAddress); } BOOL isResolvingStub(PCODE stubStartAddress) { WRAPPER_NO_CONTRACT; SUPPORTS_DAC; return GetResolveRangeList()->IsInRange(stubStartAddress); } BOOL isLookupStub(PCODE stubStartAddress) { WRAPPER_NO_CONTRACT; SUPPORTS_DAC; return GetLookupRangeList()->IsInRange(stubStartAddress); } BOOL isVTableCallStub(PCODE stubStartAddress) { WRAPPER_NO_CONTRACT; SUPPORTS_DAC; return GetVTableCallRangeList()->IsInRange(stubStartAddress); } static BOOL isDispatchingStubStatic(PCODE addr) { WRAPPER_NO_CONTRACT; StubKind stubKind; FindStubManager(addr, &stubKind); return stubKind == SK_DISPATCH; } static BOOL isResolvingStubStatic(PCODE addr) { WRAPPER_NO_CONTRACT; StubKind stubKind; FindStubManager(addr, &stubKind); return stubKind == SK_RESOLVE; } static BOOL isLookupStubStatic(PCODE addr) { WRAPPER_NO_CONTRACT; StubKind stubKind; FindStubManager(addr, &stubKind); return stubKind == SK_LOOKUP; } static BOOL isVtableCallStubStatic(PCODE addr) { WRAPPER_NO_CONTRACT; StubKind stubKind; FindStubManager(addr, &stubKind); return stubKind == SK_VTABLECALL; } //use range lists to track the chunks of memory that are part of each heap LockedRangeList lookup_rangeList; LockedRangeList resolve_rangeList; LockedRangeList dispatch_rangeList; LockedRangeList cache_entry_rangeList; LockedRangeList vtable_rangeList; // Get dac-ized pointers to rangelist. RangeList* GetLookupRangeList() { SUPPORTS_DAC; TADDR addr = PTR_HOST_MEMBER_TADDR(VirtualCallStubManager, this, lookup_rangeList); return PTR_RangeList(addr); } RangeList* GetResolveRangeList() { SUPPORTS_DAC; TADDR addr = PTR_HOST_MEMBER_TADDR(VirtualCallStubManager, this, resolve_rangeList); return PTR_RangeList(addr); } RangeList* GetDispatchRangeList() { SUPPORTS_DAC; TADDR addr = PTR_HOST_MEMBER_TADDR(VirtualCallStubManager, this, dispatch_rangeList); return PTR_RangeList(addr); } RangeList* GetCacheEntryRangeList() { SUPPORTS_DAC; TADDR addr = PTR_HOST_MEMBER_TADDR(VirtualCallStubManager, this, cache_entry_rangeList); return PTR_RangeList(addr); } RangeList* GetVTableCallRangeList() { SUPPORTS_DAC; TADDR addr = PTR_HOST_MEMBER_TADDR(VirtualCallStubManager, this, vtable_rangeList); return PTR_RangeList(addr); } private: //allocate and initialize a stub of the desired kind DispatchHolder *GenerateDispatchStub(PCODE addrOfCode, PCODE addrOfFail, void *pMTExpected, size_t dispatchToken, bool *pMayHaveReenteredCooperativeGCMode); #ifdef TARGET_AMD64 // Used to allocate a long jump dispatch stub. See comment around // m_fShouldAllocateLongJumpDispatchStubs for explaination. DispatchHolder *GenerateDispatchStubLong(PCODE addrOfCode, PCODE addrOfFail, void *pMTExpected, size_t dispatchToken, bool *pMayHaveReenteredCooperativeGCMode); #endif ResolveHolder *GenerateResolveStub(PCODE addrOfResolver, PCODE addrOfPatcher, size_t dispatchToken #if defined(TARGET_X86) && !defined(UNIX_X86_ABI) , size_t stackArgumentsSize #endif ); LookupHolder *GenerateLookupStub(PCODE addrOfResolver, size_t dispatchToken); VTableCallHolder* GenerateVTableCallStub(DWORD slot); template <typename STUB_HOLDER> void AddToCollectibleVSDRangeList(STUB_HOLDER *holder) { if (m_loaderAllocator->IsCollectible()) { parentDomain->GetCollectibleVSDRanges()->AddRange(reinterpret_cast<BYTE *>(holder->stub()), reinterpret_cast<BYTE *>(holder->stub()) + holder->stub()->size(), this); } } // The resolve cache is static across all AppDomains ResolveCacheElem *GenerateResolveCacheElem(void *addrOfCode, void *pMTExpected, size_t token, bool *pMayHaveReenteredCooperativeGCMode); ResolveCacheElem *GetResolveCacheElem(void *pMT, size_t token, void *target); //Given a dispatch token, an object and a method table, determine the //target address to go to. The return value (BOOL) states whether this address //is cacheable or not. static BOOL Resolver(MethodTable * pMT, DispatchToken token, OBJECTREF * protectedObj, PCODE * ppTarget, BOOL throwOnConflict); // This can be used to find a target without needing the ability to throw static BOOL TraceResolver(Object *pObj, DispatchToken token, TraceDestination *trace); public: // Return the MethodDesc corresponding to this token. static MethodDesc *GetRepresentativeMethodDescFromToken(DispatchToken token, MethodTable *pMT); static MethodDesc *GetInterfaceMethodDescFromToken(DispatchToken token); static MethodTable *GetTypeFromToken(DispatchToken token); //This is used to get the token out of a stub static size_t GetTokenFromStub(PCODE stub); //This is used to get the token out of a stub and we know the stub manager and stub kind static size_t GetTokenFromStubQuick(VirtualCallStubManager * pMgr, PCODE stub, StubKind kind); // General utility functions // Quick lookup in the cache. NOTHROW, GC_NOTRIGGER static PCODE CacheLookup(size_t token, UINT16 tokenHash, MethodTable *pMT); // Full exhaustive lookup. THROWS, GC_TRIGGERS static PCODE GetTarget(DispatchToken token, MethodTable *pMT, BOOL throwOnConflict); private: // Given a dispatch token, return true if the token represents an interface, false if just a slot. static BOOL IsInterfaceToken(DispatchToken token); // Given a dispatch token, return true if the token represents a slot on the target. static BOOL IsClassToken(DispatchToken token); #ifdef CHAIN_LOOKUP static ResolveCacheElem* __fastcall PromoteChainEntry(ResolveCacheElem *pElem); #endif // Flags used by the non-x86 versions of VSD_ResolveWorker #define SDF_ResolveBackPatch (0x01) #define SDF_ResolvePromoteChain (0x02) #define SDF_ResolveFlags (0x03) // These method needs to call the instance methods. friend PCODE VSD_ResolveWorker(TransitionBlock * pTransitionBlock, TADDR siteAddrForRegisterIndirect, size_t token #ifndef TARGET_X86 , UINT_PTR flags #endif ); #if defined(TARGET_X86) && defined(TARGET_UNIX) friend void BackPatchWorkerStaticStub(PCODE returnAddr, TADDR siteAddrForRegisterIndirect); #endif //These are the entrypoints that the stubs actually end up calling via the // xxxAsmStub methods above static void STDCALL BackPatchWorkerStatic(PCODE returnAddr, TADDR siteAddrForRegisterIndirect); public: PCODE ResolveWorker(StubCallSite* pCallSite, OBJECTREF *protectedObj, DispatchToken token, StubKind stubKind); void BackPatchWorker(StubCallSite* pCallSite); //Change the callsite to point to stub void BackPatchSite(StubCallSite* pCallSite, PCODE stub); public: /* the following two public functions are to support tracing or stepping thru stubs via the debugger. */ virtual BOOL CheckIsStub_Internal(PCODE stubStartAddress); virtual BOOL TraceManager(Thread *thread, TraceDestination *trace, T_CONTEXT *pContext, BYTE **pRetAddr); size_t GetSize() { LIMITED_METHOD_CONTRACT; size_t retval=0; if(indcell_heap) retval+=indcell_heap->GetSize(); if(cache_entry_heap) retval+=cache_entry_heap->GetSize(); if(lookup_heap) retval+=lookup_heap->GetSize(); if(dispatch_heap) retval+=dispatch_heap->GetSize(); if(resolve_heap) retval+=resolve_heap->GetSize(); return retval; }; private: /* the following two private functions are to support tracing or stepping thru stubs via the debugger. */ virtual BOOL DoTraceStub(PCODE stubStartAddress, TraceDestination *trace); private: // The parent domain of this manager PTR_BaseDomain parentDomain; PTR_LoaderAllocator m_loaderAllocator; BYTE * m_initialReservedMemForHeaps; static const UINT32 INDCELLS_PER_BLOCK = 32; // 32 indirection cells per block. CrstExplicitInit m_indCellLock; // List of free indirection cells. The cells were directly allocated from the loader heap // (code:VirtualCallStubManager::GenerateStubIndirection) BYTE * m_FreeIndCellList; // List of recycled indirection cells. The cells were recycled from finalized dynamic methods // (code:LCGMethodResolver::RecycleIndCells). BYTE * m_RecycledIndCellList; #ifndef DACCESS_COMPILE // This methods returns the a free cell from m_FreeIndCellList. It returns NULL if the list is empty. BYTE * GetOneFreeIndCell() { WRAPPER_NO_CONTRACT; return GetOneIndCell(&m_FreeIndCellList); } // This methods returns the a recycled cell from m_RecycledIndCellList. It returns NULL if the list is empty. BYTE * GetOneRecycledIndCell() { WRAPPER_NO_CONTRACT; return GetOneIndCell(&m_RecycledIndCellList); } // This methods returns the a cell from ppList. It returns NULL if the list is empty. BYTE * GetOneIndCell(BYTE ** ppList) { CONTRACT (BYTE*) { NOTHROW; GC_NOTRIGGER; MODE_ANY; PRECONDITION(CheckPointer(ppList)); PRECONDITION(m_indCellLock.OwnedByCurrentThread()); } CONTRACT_END; BYTE * temp = *ppList; if (temp) { BYTE * pNext = *((BYTE **)temp); *ppList = pNext; RETURN temp; } RETURN NULL; } // insert a linked list of indirection cells at the beginning of m_FreeIndCellList void InsertIntoFreeIndCellList(BYTE * head, BYTE * tail) { WRAPPER_NO_CONTRACT; InsertIntoIndCellList(&m_FreeIndCellList, head, tail); } // insert a linked list of indirection cells at the beginning of ppList void InsertIntoIndCellList(BYTE ** ppList, BYTE * head, BYTE * tail) { CONTRACTL { NOTHROW; GC_NOTRIGGER; PRECONDITION(CheckPointer(ppList)); PRECONDITION(CheckPointer(head)); PRECONDITION(CheckPointer(tail)); PRECONDITION(m_indCellLock.OwnedByCurrentThread()); } CONTRACTL_END; BYTE * temphead = *ppList; *((BYTE**)tail) = temphead; *ppList = head; } #endif // !DACCESS_COMPILE PTR_LoaderHeap indcell_heap; // indirection cells go here PTR_LoaderHeap cache_entry_heap; // resolve cache elem entries go here PTR_LoaderHeap lookup_heap; // lookup stubs go here PTR_LoaderHeap dispatch_heap; // dispatch stubs go here PTR_LoaderHeap resolve_heap; // resolve stubs go here PTR_LoaderHeap vtable_heap; // vtable-based jump stubs go here #ifdef TARGET_AMD64 // When we layout the stub heaps, we put them close together in a sequential order // so that we maximize performance with respect to branch predictions. On AMD64, // dispatch stubs use a rel32 jump on failure to the resolve stub. This works for // a while because of the ordering, but as soon as we have to start allocating more // memory for either the dispatch or resolve heaps we have a chance that we'll be // further away than a rel32 jump can reach, because we're in a 64-bit address // space. As such, this flag will indicate when we allocate the first dispatch stub // that cannot reach a resolve stub, and when this happens we'll switch over to // allocating the larger version of the dispatch stub which contains an abs64 jump. //@TODO: This is a bit of a workaround, but the limitations of LoaderHeap require that we //@TODO: take this approach. Hopefully in Orcas we'll have a chance to rewrite LoaderHeap. BOOL m_fShouldAllocateLongJumpDispatchStubs; // Defaults to FALSE. #endif BucketTable * lookups; // hash table of lookups keyed by tokens BucketTable * cache_entries; // hash table of dispatch token/target structs for dispatch cache BucketTable * dispatchers; // hash table of dispatching stubs keyed by tokens/actualtype BucketTable * resolvers; // hash table of resolvers keyed by tokens/resolverstub BucketTable * vtableCallers; // hash table of vtable call stubs keyed by slot values // This structure is used to keep track of the fail counters. // We only need one fail counter per ResolveStub, // and most programs use less than 250 ResolveStubs // We allocate these on the main heap using "new counter block" struct counter_block { static const UINT32 MAX_COUNTER_ENTRIES = 256-2; // 254 counters should be enough for most cases. counter_block * next; // the next block UINT32 used; // the index of the next free entry INT32 block[MAX_COUNTER_ENTRIES]; // the counters }; counter_block *m_counters; // linked list of counter blocks of failure counters counter_block *m_cur_counter_block; // current block for updating counts counter_block *m_cur_counter_block_for_reclaim; // current block for updating UINT32 m_cur_counter_block_for_reclaim_index; // index into the current block for updating // Used to keep track of all the VCSManager objects in the system. PTR_VirtualCallStubManager m_pNext; // Linked list pointer public: // Given a stub address, find the VCSManager that owns it. static VirtualCallStubManager *FindStubManager(PCODE addr, StubKind* wbStubKind = NULL, BOOL usePredictStubKind = TRUE); #ifndef DACCESS_COMPILE // insert a linked list of indirection cells at the beginning of m_RecycledIndCellList void InsertIntoRecycledIndCellList_Locked(BYTE * head, BYTE * tail) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_ANY; } CONTRACTL_END; CrstHolder lh(&m_indCellLock); InsertIntoIndCellList(&m_RecycledIndCellList, head, tail); } #endif // !DACCESS_COMPILE // These are the counters for keeping statistics struct { UINT32 site_counter; //# of call sites UINT32 stub_lookup_counter; //# of lookup stubs UINT32 stub_poly_counter; //# of resolve stubs UINT32 stub_mono_counter; //# of dispatch stubs UINT32 stub_vtable_counter; //# of vtable call stubs UINT32 site_write; //# of call site backpatch writes UINT32 site_write_poly; //# of call site backpatch writes to point to resolve stubs UINT32 site_write_mono; //# of call site backpatch writes to point to dispatch stubs UINT32 worker_call; //# of calls into ResolveWorker UINT32 worker_call_no_patch; //# of times call_worker resulted in no patch UINT32 worker_collide_to_mono; //# of times we converted a poly stub to a mono stub instead of writing the cache entry UINT32 stub_space; //# of bytes of stubs UINT32 cache_entry_counter; //# of cache structs UINT32 cache_entry_space; //# of bytes used by cache lookup structs } stats; void LogStats(); #ifdef DACCESS_COMPILE protected: virtual void DoEnumMemoryRegions(CLRDataEnumMemoryFlags flags); virtual LPCWSTR GetStubManagerName(PCODE addr) { WRAPPER_NO_CONTRACT; CONSISTENCY_CHECK(isStub(addr)); if (isLookupStub(addr)) { return W("VSD_LookupStub"); } else if (isDispatchingStub(addr)) { return W("VSD_DispatchStub"); } else { CONSISTENCY_CHECK(isResolvingStub(addr)); return W("VSD_ResolveStub"); } } #endif }; /******************************************************************************************************** ********************************************************************************************************/ typedef VPTR(class VirtualCallStubManagerManager) PTR_VirtualCallStubManagerManager; class VirtualCallStubManagerIterator; class VirtualCallStubManagerManager : public StubManager { VPTR_VTABLE_CLASS(VirtualCallStubManagerManager, StubManager) friend class StubManager; friend class VirtualCallStubManager; friend class VirtualCallStubManagerIterator; friend class StubManagerIterator; public: virtual BOOL TraceManager(Thread *thread, TraceDestination *trace, T_CONTEXT *pContext, BYTE **pRetAddr); virtual BOOL CheckIsStub_Internal(PCODE stubStartAddress); virtual BOOL DoTraceStub(PCODE stubStartAddress, TraceDestination *trace); static MethodDesc *Entry2MethodDesc(PCODE stubStartAddress, MethodTable *pMT); #ifdef DACCESS_COMPILE virtual void DoEnumMemoryRegions(CLRDataEnumMemoryFlags flags); virtual LPCWSTR GetStubManagerName(PCODE addr) { WRAPPER_NO_CONTRACT; return FindVirtualCallStubManager(addr)->GetStubManagerName(addr); } #endif private: // Used to keep track of all the VCSManager objects in the system. PTR_VirtualCallStubManager m_pManagers; // Head of the linked list #ifndef DACCESS_COMPILE // Ctor. This is only used by StaticInit. VirtualCallStubManagerManager(); #endif // A cache element to quickly check the last matched manager. Volatile<VirtualCallStubManager*> m_pCacheElem; // RW lock for reading entries and removing them. SimpleRWLock m_RWLock; // This will look through all the managers in an intelligent fashion to // find the manager that owns the address. VirtualCallStubManager *FindVirtualCallStubManager(PCODE stubAddress); protected: // Add a VCSManager to the linked list. void AddStubManager(VirtualCallStubManager *pMgr); // Remove a VCSManager from the linked list. void RemoveStubManager(VirtualCallStubManager *pMgr); VirtualCallStubManager *FirstManager() { WRAPPER_NO_CONTRACT; return m_pManagers; } #ifndef DACCESS_COMPILE static void InitStatic(); #endif public: SPTR_DECL(VirtualCallStubManagerManager, g_pManager); static VirtualCallStubManagerManager *GlobalManager() { LIMITED_METHOD_DAC_CONTRACT; CONSISTENCY_CHECK(CheckPointer(g_pManager)); return g_pManager; } VirtualCallStubManagerIterator IterateVirtualCallStubManagers(); #ifdef _DEBUG // Debug helper to help identify stub-managers. virtual const char * DbgGetName() { LIMITED_METHOD_CONTRACT; return "VirtualCallStubManagerManager"; } #endif }; /******************************************************************************************************** ********************************************************************************************************/ class VirtualCallStubManagerIterator { friend class VirtualCallStubManagerManager; public: BOOL Next(); VirtualCallStubManager *Current(); // Copy ctor inline VirtualCallStubManagerIterator(const VirtualCallStubManagerIterator &it); protected: inline VirtualCallStubManagerIterator(VirtualCallStubManagerManager *pMgr); BOOL m_fIsStart; VirtualCallStubManager *m_pCurMgr; }; ///////////////////////////////////////////////////////////////////////////////////////////// // Ctor inline VirtualCallStubManagerIterator::VirtualCallStubManagerIterator(VirtualCallStubManagerManager *pMgr) : m_fIsStart(TRUE), m_pCurMgr(pMgr->m_pManagers) { LIMITED_METHOD_DAC_CONTRACT; CONSISTENCY_CHECK(CheckPointer(pMgr)); } ///////////////////////////////////////////////////////////////////////////////////////////// // Copy ctor inline VirtualCallStubManagerIterator::VirtualCallStubManagerIterator(const VirtualCallStubManagerIterator &it) : m_fIsStart(it.m_fIsStart), m_pCurMgr(it.m_pCurMgr) { LIMITED_METHOD_DAC_CONTRACT; } /******************************************************************************************************** #StubDispatchNotes A note on approach. The cache and hash tables used by the stub and lookup mechanism are designed with an eye to minimizing interlocking and/or syncing and/or locking operations. They are intended to run in a highly concurrent environment. Since there is no magic, some tradeoffs and and some implementation constraints are required. The basic notion is that if all reads and writes are atomic and if all functions and operations operate correctly in the face of commutative reorderings of the visibility of all reads and writes across threads, then we don't have to interlock, sync, or serialize. Our approximation of this is: 1. All reads and all writes to tables must be atomic. This effectively limits the actual entry size in a table to be a pointer or a pointer sized thing. 2. All functions, like comparisons for equality or computation of hash values must function correctly in the face of concurrent updating of the underlying table. This is accomplished by making the underlying structures/entries effectively immutable, if concurrency is in anyway possible. By effectively immutatable, we mean that the stub or token structure is either immutable or that if it is ever written, all possibley concurrent writes are attempting to write the same value (atomically) or that the competing (atomic) values do not affect correctness, and that the function operates correctly whether or not any of the writes have taken place (is visible yet). The constraint we maintain is that all competeing updates (and their visibility or lack thereof) do not alter the correctness of the program. 3. All tables are inexact. The counts they hold (e.g. number of contained entries) may be inaccurrate, but that inaccurracy cannot affect their correctness. Table modifications, such as insertion of an new entry may not succeed, but such failures cannot affect correctness. This implies that just because a stub/entry is not present in a table, e.g. has been removed, that does not mean that it is not in use. It also implies that internal table structures, such as discarded hash table buckets, cannot be freely recycled since another concurrent thread may still be walking thru it. 4. Occassionaly it is necessary to pick up the pieces that have been dropped on the floor so to speak, e.g. actually recycle hash buckets that aren't in use. Since we have a natural sync point already in the GC, we use that to provide cleanup points. We need to make sure that code that is walking our structures is not a GC safe point. Hence if the GC calls back into us inside the GC sync point, we know that nobody is inside our stuctures and we can safely rearrange and recycle things. ********************************************************************************************************/ //initial and increment value for fail stub counters #ifdef STUB_LOGGING extern UINT32 STUB_MISS_COUNT_VALUE; extern UINT32 STUB_COLLIDE_WRITE_PCT; extern UINT32 STUB_COLLIDE_MONO_PCT; #else // !STUB_LOGGING #define STUB_MISS_COUNT_VALUE 100 #define STUB_COLLIDE_WRITE_PCT 100 #define STUB_COLLIDE_MONO_PCT 0 #endif // !STUB_LOGGING //size and mask of the cache used by resolve stubs // CALL_STUB_CACHE_SIZE must be equal to 2^CALL_STUB_CACHE_NUM_BITS #define CALL_STUB_CACHE_NUM_BITS 12 //10 #define CALL_STUB_CACHE_SIZE 4096 //1024 #define CALL_STUB_CACHE_MASK (CALL_STUB_CACHE_SIZE-1) #define CALL_STUB_CACHE_PROBES 5 //min sizes for BucketTable and buckets and the growth and hashing constants #define CALL_STUB_MIN_BUCKETS 32 #define CALL_STUB_MIN_ENTRIES 4 //this is so that the very first growth will jump from 4 to 32 entries, then double from there. #define CALL_STUB_SECONDARY_ENTRIES 8 #define CALL_STUB_GROWTH_FACTOR 2 #define CALL_STUB_LOAD_FACTOR 90 #define CALL_STUB_HASH_CONST1 1327 #define CALL_STUB_HASH_CONST2 43627 #define LARGE_PRIME 7199369 //internal layout of buckets=size-1,count,entries.... #define CALL_STUB_MASK_INDEX 0 #define CALL_STUB_COUNT_INDEX 1 #define CALL_STUB_DEAD_LINK 2 #define CALL_STUB_FIRST_INDEX 3 //marker entries in cache and hash tables #define CALL_STUB_EMPTY_ENTRY 0 // number of successes for a chained element before it gets moved to the front #define CALL_STUB_CACHE_INITIAL_SUCCESS_COUNT (0x100) /******************************************************************************************************* Entry is an abstract class. We will make specific subclasses for each kind of entry. Entries hold references to stubs or tokens. The principle thing they provide is a virtual Equals function that is used by the caching and hashing tables within which the stubs and tokens are stored. Entries are typically stack allocated by the routines that call into the hash and caching functions, and the functions stuff stubs into the entry to do the comparisons. Essentially specific entry subclasses supply a vtable to a stub as and when needed. This means we don't have to have vtables attached to stubs. Summarizing so far, there is a struct for each kind of stub or token of the form XXXXStub. They provide that actual storage layouts. There is a stuct in which each stub which has code is containted of the form XXXXHolder. They provide alignment and anciliary storage for the stub code. There is a subclass of Entry for each kind of stub or token, of the form XXXXEntry. They provide the specific implementations of the virtual functions declared in Entry. */ class Entry { public: //access and compare the keys of the entry virtual BOOL Equals(size_t keyA, size_t keyB)=0; virtual size_t KeyA()=0; virtual size_t KeyB()=0; //contents is the struct or token that the entry exposes virtual void SetContents(size_t contents)=0; }; /* define the platform specific Stubs and stub holders */ #include <virtualcallstubcpu.hpp> #if USES_LOOKUP_STUBS /********************************************************************************************** LookupEntry wraps LookupStubs and provide the concrete implementation of the abstract class Entry. Virtual and interface call sites when they are first jitted point to LookupStubs. The hash table that contains look up stubs is keyed by token, hence the Equals function uses the embedded token in the stub for comparison purposes. Since we are willing to allow duplicates in the hash table (as long as they are relatively rare) we do use direct comparison of the tokens rather than extracting the fields from within the tokens, for perf reasons. */ class LookupEntry : public Entry { public: //Creates an entry that wraps lookup stub s LookupEntry(size_t s) { LIMITED_METHOD_CONTRACT; _ASSERTE(VirtualCallStubManager::isLookupStubStatic((PCODE)s)); stub = (LookupStub*) s; } //default contructor to allow stack and inline allocation of lookup entries LookupEntry() {LIMITED_METHOD_CONTRACT; stub = NULL;} //implementations of abstract class Entry BOOL Equals(size_t keyA, size_t keyB) { WRAPPER_NO_CONTRACT; return stub && (keyA == KeyA()) && (keyB == KeyB()); } size_t KeyA() { WRAPPER_NO_CONTRACT; return Token(); } size_t KeyB() { WRAPPER_NO_CONTRACT; return (size_t)0; } void SetContents(size_t contents) { LIMITED_METHOD_CONTRACT; _ASSERTE(VirtualCallStubManager::isLookupStubStatic((PCODE)contents)); stub = LookupHolder::FromLookupEntry((PCODE)contents)->stub(); } //extract the token of the underlying lookup stub inline size_t Token() { LIMITED_METHOD_CONTRACT; return stub ? stub->token() : 0; } private: LookupStub* stub; //the stub the entry wrapping }; #endif // USES_LOOKUP_STUBS class VTableCallEntry : public Entry { public: //Creates an entry that wraps vtable call stub VTableCallEntry(size_t s) { LIMITED_METHOD_CONTRACT; _ASSERTE(VirtualCallStubManager::isVtableCallStubStatic((PCODE)s)); stub = (VTableCallStub*)s; } //default contructor to allow stack and inline allocation of vtable call entries VTableCallEntry() { LIMITED_METHOD_CONTRACT; stub = NULL; } //implementations of abstract class Entry BOOL Equals(size_t keyA, size_t keyB) { WRAPPER_NO_CONTRACT; return stub && (keyA == KeyA()) && (keyB == KeyB()); } size_t KeyA() { WRAPPER_NO_CONTRACT; return Token(); } size_t KeyB() { WRAPPER_NO_CONTRACT; return (size_t)0; } void SetContents(size_t contents) { LIMITED_METHOD_CONTRACT; _ASSERTE(VirtualCallStubManager::isVtableCallStubStatic((PCODE)contents)); stub = VTableCallHolder::FromVTableCallEntry((PCODE)contents)->stub(); } //extract the token of the underlying lookup stub inline size_t Token() { LIMITED_METHOD_CONTRACT; return stub ? stub->token() : 0; } private: VTableCallStub* stub; //the stub the entry wrapping }; /********************************************************************************************** ResolveCacheEntry wraps a ResolveCacheElem and provides lookup functionality for entries that were created that may be added to the ResolveCache */ class ResolveCacheEntry : public Entry { public: ResolveCacheEntry(size_t elem) { LIMITED_METHOD_CONTRACT; _ASSERTE(elem != 0); pElem = (ResolveCacheElem*) elem; } //default contructor to allow stack and inline allocation of lookup entries ResolveCacheEntry() { LIMITED_METHOD_CONTRACT; pElem = NULL; } //access and compare the keys of the entry virtual BOOL Equals(size_t keyA, size_t keyB) { WRAPPER_NO_CONTRACT; return pElem && (keyA == KeyA()) && (keyB == KeyB()); } virtual size_t KeyA() { LIMITED_METHOD_CONTRACT; return pElem != NULL ? pElem->token : 0; } virtual size_t KeyB() { LIMITED_METHOD_CONTRACT; return pElem != NULL ? (size_t) pElem->pMT : 0; } //contents is the struct or token that the entry exposes virtual void SetContents(size_t contents) { LIMITED_METHOD_CONTRACT; pElem = (ResolveCacheElem*) contents; } inline const BYTE *Target() { LIMITED_METHOD_CONTRACT; return pElem != NULL ? (const BYTE *)pElem->target : NULL; } private: ResolveCacheElem *pElem; }; /********************************************************************************************** ResolveEntry wraps ResolveStubs and provide the concrete implementation of the abstract class Entry. Polymorphic call sites and monomorphic calls that fail end up in a ResolveStub. Resolve stubs are stored in hash tables keyed by token, hence the Equals function uses the embedded token in the stub for comparison purposes. Since we are willing to allow duplicates in the hash table (as long as they are relatively rare) we do use direct comparison of the tokens rather than extracting the fields from within the tokens, for perf reasons. */ class ResolveEntry : public Entry { public: //Creates an entry that wraps resolve stub s ResolveEntry (size_t s) { LIMITED_METHOD_CONTRACT; _ASSERTE(VirtualCallStubManager::isResolvingStubStatic((PCODE)s)); stub = (ResolveStub*) s; } //default contructor to allow stack and inline allocation of resovler entries ResolveEntry() { LIMITED_METHOD_CONTRACT; stub = CALL_STUB_EMPTY_ENTRY; } //implementations of abstract class Entry inline BOOL Equals(size_t keyA, size_t keyB) { WRAPPER_NO_CONTRACT; return stub && (keyA == KeyA()) && (keyB == KeyB()); } inline size_t KeyA() { WRAPPER_NO_CONTRACT; return Token(); } inline size_t KeyB() { WRAPPER_NO_CONTRACT; return (size_t)0; } void SetContents(size_t contents) { LIMITED_METHOD_CONTRACT; _ASSERTE(VirtualCallStubManager::isResolvingStubStatic((PCODE)contents)); stub = ResolveHolder::FromResolveEntry((PCODE)contents)->stub(); } //extract the token of the underlying resolve stub inline size_t Token() { WRAPPER_NO_CONTRACT; return stub ? (size_t)(stub->token()) : 0; } private: ResolveStub* stub; //the stub the entry is wrapping }; /********************************************************************************************** DispatchEntry wraps DispatchStubs and provide the concrete implementation of the abstract class Entry. Monomorphic and mostly monomorphic call sites eventually point to DispatchStubs. Dispatch stubs are placed in hash and cache tables keyed by the expected Method Table and token they are built for. Since we are willing to allow duplicates in the hash table (as long as they are relatively rare) we do use direct comparison of the tokens rather than extracting the fields from within the tokens, for perf reasons.*/ class DispatchEntry : public Entry { public: //Creates an entry that wraps dispatch stub s DispatchEntry (size_t s) { LIMITED_METHOD_CONTRACT; _ASSERTE(VirtualCallStubManager::isDispatchingStubStatic((PCODE)s)); stub = (DispatchStub*) s; } //default contructor to allow stack and inline allocation of resovler entries DispatchEntry() { LIMITED_METHOD_CONTRACT; stub = CALL_STUB_EMPTY_ENTRY; } //implementations of abstract class Entry inline BOOL Equals(size_t keyA, size_t keyB) { WRAPPER_NO_CONTRACT; return stub && (keyA == KeyA()) && (keyB == KeyB()); } inline size_t KeyA() { WRAPPER_NO_CONTRACT; return Token(); } inline size_t KeyB() { WRAPPER_NO_CONTRACT; return ExpectedMT();} void SetContents(size_t contents) { LIMITED_METHOD_CONTRACT; _ASSERTE(VirtualCallStubManager::isDispatchingStubStatic((PCODE)contents)); stub = DispatchHolder::FromDispatchEntry((PCODE)contents)->stub(); } //extract the fields of the underlying dispatch stub inline size_t ExpectedMT() { WRAPPER_NO_CONTRACT; return stub ? (size_t)(stub->expectedMT()) : 0; } size_t Token() { WRAPPER_NO_CONTRACT; if (stub) { ResolveHolder * resolveHolder = ResolveHolder::FromFailEntry(stub->failTarget()); size_t token = resolveHolder->stub()->token(); _ASSERTE(token == VirtualCallStubManager::GetTokenFromStub((PCODE)stub)); return token; } else { return 0; } } inline PCODE Target() { WRAPPER_NO_CONTRACT; return stub ? stub->implTarget() : 0; } private: DispatchStub* stub; }; /************************************************************************************************* DispatchCache is the cache table that the resolve stubs use for inline polymorphic resolution of a call. The cache entry is logically a triplet of (method table, token, impl address) where method table is the type of the calling frame's <this>, token identifies the method being invoked, i.e. is a (type id,slot #) pair, and impl address is the address of the method implementation. */ class DispatchCache { public: static const UINT16 INVALID_HASH = (UINT16)(-1); DispatchCache(); //read and write the cache keyed by (method table,token) pair. inline ResolveCacheElem* Lookup(size_t token, void* mt) { WRAPPER_NO_CONTRACT; return Lookup(token, INVALID_HASH, mt);} ResolveCacheElem* Lookup(size_t token, UINT16 tokenHash, void* mt); enum InsertKind {IK_NONE, IK_DISPATCH, IK_RESOLVE, IK_SHARED, IK_EXTERNAL}; BOOL Insert(ResolveCacheElem* elem, InsertKind insertKind); #ifdef CHAIN_LOOKUP void PromoteChainEntry(ResolveCacheElem* elem); #endif // This is the heavyweight hashing algorithm. Use sparingly. static UINT16 HashToken(size_t token); inline void GetLoadFactor(size_t *total, size_t *used) { LIMITED_METHOD_CONTRACT; *total = CALL_STUB_CACHE_SIZE; size_t count = 0; for (size_t i = 0; i < CALL_STUB_CACHE_SIZE; i++) if (cache[i] != empty) count++; *used = count; } inline void *GetCacheBaseAddr() { LIMITED_METHOD_CONTRACT; return &cache[0]; } inline size_t GetCacheCount() { LIMITED_METHOD_CONTRACT; return CALL_STUB_CACHE_SIZE; } inline ResolveCacheElem *GetCacheEntry(size_t idx) { LIMITED_METHOD_CONTRACT; return VolatileLoad(&cache[idx]); } inline BOOL IsCacheEntryEmpty(size_t idx) { LIMITED_METHOD_CONTRACT; return cache[idx] == empty; } inline void SetCacheEntry(size_t idx, ResolveCacheElem *elem) { LIMITED_METHOD_CONTRACT; #ifdef STUB_LOGGING cacheData[idx].numWrites++; #endif #ifdef CHAIN_LOOKUP CONSISTENCY_CHECK(m_writeLock.OwnedByCurrentThread()); #endif cache[idx] = elem; } inline void ClearCacheEntry(size_t idx) { LIMITED_METHOD_CONTRACT; #ifdef STUB_LOGGING cacheData[idx].numClears++; #endif cache[idx] = empty; } struct { UINT32 insert_cache_external; //# of times Insert was called for IK_EXTERNAL UINT32 insert_cache_shared; //# of times Insert was called for IK_SHARED UINT32 insert_cache_dispatch; //# of times Insert was called for IK_DISPATCH UINT32 insert_cache_resolve; //# of times Insert was called for IK_RESOLVE UINT32 insert_cache_hit; //# of times Insert found an empty cache entry UINT32 insert_cache_miss; //# of times Insert already had a matching cache entry UINT32 insert_cache_collide; //# of times Insert found a used cache entry UINT32 insert_cache_write; //# of times Insert wrote a cache entry } stats; void LogStats(); // Unlocked iterator of entries. Use only when read/write access to the cache // is safe. This would typically be at GC sync points, currently needed during // appdomain unloading. class Iterator { public: Iterator(DispatchCache *pCache); inline BOOL IsValid() { WRAPPER_NO_CONTRACT; return (m_curBucket < (INT32)m_pCache->GetCacheCount()); } void Next(); // Unlink the current entry. // **NOTE** Using this method implicitly performs a call to Next to move // past the unlinked entry. Thus, one could accidentally skip // entries unless you take this into consideration. ResolveCacheElem *UnlinkEntry(); inline ResolveCacheElem *Entry() { LIMITED_METHOD_CONTRACT; CONSISTENCY_CHECK(IsValid()); return *m_ppCurElem; } private: void NextValidBucket(); inline void NextBucket() { LIMITED_METHOD_CONTRACT; m_curBucket++; m_ppCurElem = &m_pCache->cache[m_curBucket]; } DispatchCache *m_pCache; INT32 m_curBucket; ResolveCacheElem **m_ppCurElem; }; private: #ifdef CHAIN_LOOKUP Crst m_writeLock; #endif //the following hash computation is also inlined in the resolve stub in asm (SO NO TOUCHIE) inline static UINT16 HashMT(UINT16 tokenHash, void* mt) { LIMITED_METHOD_CONTRACT; UINT16 hash; size_t mtHash = (size_t) mt; mtHash = (((mtHash >> CALL_STUB_CACHE_NUM_BITS) + mtHash) >> LOG2_PTRSIZE) & CALL_STUB_CACHE_MASK; hash = (UINT16) mtHash; hash ^= (tokenHash & CALL_STUB_CACHE_MASK); return hash; } ResolveCacheElem* cache[CALL_STUB_CACHE_SIZE]; //must be first ResolveCacheElem* empty; //empty entry, initialized to fail all comparisons #ifdef STUB_LOGGING public: struct CacheEntryData { UINT32 numWrites; UINT16 numClears; }; CacheEntryData cacheData[CALL_STUB_CACHE_SIZE]; #endif // STUB_LOGGING }; /************************************************************************************************** The hash tables are accessed via instances of the Prober. Prober is a probe into a bucket of the hash table, and therefore has an index which is the current probe position. It includes a count of the number of probes done in that bucket so far and a stride to step thru the bucket with. To do comparisons, it has a reference to an entry with which it can do comparisons (Equals(...)) of the entries (stubs) inside the hash table. It also has the key pair (keyA, keyB) that it is looking for. Typically, an entry of the appropriate type is created on the stack and then the prober is created passing in a reference to the entry. The prober is used for a complete operation, such as look for and find an entry (stub), creating and inserting it as necessary. The initial index and the stride are orthogonal hashes of the key pair, i.e. we are doing a varient of double hashing. When we initialize the prober (see FormHash below) we set the initial probe based on one hash. The stride (used as a modulo addition of the probe position) is based on a different hash and is such that it will vist every location in the bucket before repeating. Hence it is imperative that the bucket size and the stride be relative prime wrt each other. We have chosen to make bucket sizes a power of 2, so we force stride to be odd. Note -- it must be assumed that multiple probers are walking the same tables and buckets at the same time. Additionally, the counts may not be accurate, and there may be duplicates in the tables. Since the tables do not allow concurrrent deletion, some of the concurrency issues are ameliorated. */ class Prober { friend class FastTable; friend class BucketTable; public: Prober(Entry* e) {LIMITED_METHOD_CONTRACT; comparer = e;} //find the requested entry, if not there return CALL_STUB_EMPTY_ENTRY size_t Find(); //add the entry into the bucket, if it is not already in the bucket. //return the entry actually in the bucket (existing or added) size_t Add(size_t entry); private: //return the bucket (FastTable*) that the prober is currently walking inline size_t* items() {LIMITED_METHOD_CONTRACT; return &base[-CALL_STUB_FIRST_INDEX];} //are there more probes possible, or have we probed everything in the bucket inline BOOL NoMore() {LIMITED_METHOD_CONTRACT; return probes>mask;} //both probes and mask are (-1) //advance the probe to a new place in the bucket inline BOOL Next() { WRAPPER_NO_CONTRACT; index = (index + stride) & mask; probes++; return !NoMore(); } inline size_t Read() { LIMITED_METHOD_CONTRACT; _ASSERTE(base); return VolatileLoad(&base[index]); } //initialize a prober across a bucket (table) for the specified keys. void InitProber(size_t key1, size_t key2, size_t* table); //set up the initial index and stride and probe count inline void FormHash() { LIMITED_METHOD_CONTRACT; probes = 0; //these two hash functions have not been formally measured for effectiveness //but they are at least orthogonal size_t a = ((keyA>>16) + keyA); size_t b = ((keyB>>16) ^ keyB); index = (((a*CALL_STUB_HASH_CONST1)>>4)+((b*CALL_STUB_HASH_CONST2)>>4)+CALL_STUB_HASH_CONST1) & mask; stride = ((a+(b*CALL_STUB_HASH_CONST1)+CALL_STUB_HASH_CONST2) | 1) & mask; } //atomically grab an empty slot so we can insert a new entry into the bucket BOOL GrabEntry(size_t entryValue); size_t keyA; //key pair we are looking for size_t keyB; size_t* base; //we have our own pointer to the bucket, so races don't matter. // We won't care if we do the lookup in an // outdated bucket (has grown out from under us). // All that will happen is possibly dropping an entry // on the floor or adding a duplicate. size_t index; //current probe point in the bucket size_t stride; //amount to step on each successive probe, must be relatively prime wrt the bucket size size_t mask; //size of bucket - 1 size_t probes; //number probes - 1 Entry* comparer;//used to compare an entry against the sought after key pair }; /******************************************************************************************************** FastTable is used to implement the buckets of a BucketTable, a bucketized hash table. A FastTable is an array of entries (contents). The first two slots of contents store the size-1 and count of entries actually in the FastTable. Note that the count may be inaccurate and there may be duplicates. Careful attention must be paid to eliminate the need for interlocked or serialized or locked operations in face of concurrency. */ #ifdef _MSC_VER #pragma warning(push) #pragma warning(disable : 4200) // disable zero-sized array warning #endif // _MSC_VER class FastTable { friend class BucketTable; public: private: FastTable() { LIMITED_METHOD_CONTRACT; } ~FastTable() { LIMITED_METHOD_CONTRACT; } //initialize a prober for the specified keys. inline BOOL SetUpProber(size_t keyA, size_t keyB, Prober* probe) { CONTRACTL { NOTHROW; GC_NOTRIGGER; FORBID_FAULT; } CONTRACTL_END; _ASSERTE(probe); _ASSERTE(contents); probe->InitProber(keyA, keyB, &contents[0]); return TRUE; } //find the requested entry (keys of prober), if not there return CALL_STUB_EMPTY_ENTRY size_t Find(Prober* probe); //add the entry, if it is not already there. Probe is used to search. //Return the entry actually containted (existing or added) size_t Add(size_t entry, Prober* probe); void IncrementCount(); // Create a FastTable with space for numberOfEntries. Please note that this method // does not throw on OOM. **YOU MUST CHECK FOR NULL RETURN** static FastTable* MakeTable(size_t numberOfEntries) { CONTRACTL { THROWS; GC_TRIGGERS; INJECT_FAULT(COMPlusThrowOM();); } CONTRACTL_END; size_t size = CALL_STUB_MIN_ENTRIES; while (size < numberOfEntries) {size = size<<1;} // if (size == CALL_STUB_MIN_ENTRIES) // size += 3; size_t* bucket = new size_t[(sizeof(FastTable)/sizeof(size_t))+size+CALL_STUB_FIRST_INDEX]; FastTable* table = new (bucket) FastTable(); table->InitializeContents(size); return table; } //Initialize as empty void InitializeContents(size_t size) { LIMITED_METHOD_CONTRACT; memset(&contents[0], CALL_STUB_EMPTY_ENTRY, (size+CALL_STUB_FIRST_INDEX)*sizeof(BYTE*)); contents[CALL_STUB_MASK_INDEX] = size-1; } inline size_t tableMask() {LIMITED_METHOD_CONTRACT; return (size_t) (contents[CALL_STUB_MASK_INDEX]);} inline size_t tableSize() {LIMITED_METHOD_CONTRACT; return tableMask()+1;} inline size_t tableCount() {LIMITED_METHOD_CONTRACT; return (size_t) (contents[CALL_STUB_COUNT_INDEX]);} inline BOOL isFull() { LIMITED_METHOD_CONTRACT; return (tableCount()+1) * 100 / CALL_STUB_LOAD_FACTOR >= tableSize(); } //we store (size-1) in bucket[CALL_STUB_MASK_INDEX==0], //we store the used count in bucket[CALL_STUB_COUNT_INDEX==1], //we have an unused cell to use as a temp at bucket[CALL_STUB_DEAD_LINK==2], //and the table starts at bucket[CALL_STUB_FIRST_INDEX==3], size_t contents[0]; }; #ifdef _MSC_VER #pragma warning(pop) #endif /****************************************************************************************************** BucketTable is a bucketized hash table. It uses FastTables for its buckets. The hash tables used by the VirtualCallStubManager are BucketTables. The number of buckets is fixed at the time the table is created. The actual buckets are allocated as needed, and grow as necessary. The reason for using buckets is primarily to reduce the cost of growing, since only a single bucket is actually grown at any given time. Since the hash tables are accessed infrequently, the load factor that controls growth is quite high (90%). Since we use hashing to pick the bucket, and we use hashing to lookup inside the bucket, it is important that the hashing function used here is orthogonal to the ones used in the buckets themselves (see FastTable::FormHash). */ class BucketTable { public: BucketTable(size_t numberOfBuckets) { WRAPPER_NO_CONTRACT; size_t size = CALL_STUB_MIN_BUCKETS; while (size < numberOfBuckets) {size = size<<1;} buckets = AllocateBuckets(size); // Initialize statistics counters memset(&stats, 0, sizeof(stats)); } ~BucketTable() { LIMITED_METHOD_CONTRACT; if(buckets != NULL) { size_t size = bucketCount()+CALL_STUB_FIRST_INDEX; for(size_t ix = CALL_STUB_FIRST_INDEX; ix < size; ix++) delete (FastTable*)(buckets[ix]); delete buckets; } } //initialize a prober for the specified keys. BOOL SetUpProber(size_t keyA, size_t keyB, Prober *prober); //find the requested entry (keys of prober), if not there return CALL_STUB_EMPTY_ENTRY inline size_t Find(Prober* probe) {WRAPPER_NO_CONTRACT; return probe->Find();} //add the entry, if it is not already there. Probe is used to search. size_t Add(size_t entry, Prober* probe); //reclaim abandoned buckets. Buckets are abaondoned when they need to grow. //needs to be called inside a gc sync point. static void Reclaim(); struct { UINT32 bucket_space; //# of bytes in caches and tables, not including the stubs themselves UINT32 bucket_space_dead; //# of bytes of abandoned buckets not yet recycled. } stats; void LogStats(); private: inline size_t bucketMask() {LIMITED_METHOD_CONTRACT; return (size_t) (buckets[CALL_STUB_MASK_INDEX]);} inline size_t bucketCount() {LIMITED_METHOD_CONTRACT; return bucketMask()+1;} inline size_t ComputeBucketIndex(size_t keyA, size_t keyB) { LIMITED_METHOD_CONTRACT; size_t a = ((keyA>>16) + keyA); size_t b = ((keyB>>16) ^ keyB); return CALL_STUB_FIRST_INDEX+(((((a*CALL_STUB_HASH_CONST2)>>5)^((b*CALL_STUB_HASH_CONST1)>>5))+CALL_STUB_HASH_CONST2) & bucketMask()); } //grows the bucket referenced by probe. BOOL GetMoreSpace(const Prober* probe); //creates storage in which to store references to the buckets static size_t* AllocateBuckets(size_t size) { LIMITED_METHOD_CONTRACT; size_t* buckets = new size_t[size+CALL_STUB_FIRST_INDEX]; if (buckets != NULL) { memset(&buckets[0], CALL_STUB_EMPTY_ENTRY, (size+CALL_STUB_FIRST_INDEX)*sizeof(void*)); buckets[CALL_STUB_MASK_INDEX] = size-1; } return buckets; } inline size_t Read(size_t index) { LIMITED_METHOD_CONTRACT; CONSISTENCY_CHECK(index <= bucketMask()+CALL_STUB_FIRST_INDEX); return VolatileLoad(&buckets[index]); } inline void Write(size_t index, size_t value) { LIMITED_METHOD_CONTRACT; CONSISTENCY_CHECK(index <= bucketMask()+CALL_STUB_FIRST_INDEX); VolatileStore(&buckets[index], value); } // We store (#buckets-1) in bucket[CALL_STUB_MASK_INDEX ==0] // We have two unused cells at bucket[CALL_STUB_COUNT_INDEX ==1] // and bucket[CALL_STUB_DEAD_LINK ==2] // and the table starts at bucket[CALL_STUB_FIRST_INDEX ==3] // the number of elements is bucket[CALL_STUB_MASK_INDEX]+CALL_STUB_FIRST_INDEX size_t* buckets; static FastTable* dead; //linked list head of to be deleted (abandoned) buckets }; #endif // !_VIRTUAL_CALL_STUB_H
1
dotnet/runtime
66,234
Remove compiler warning suppression
Contributes to https://github.com/dotnet/runtime/issues/66154 All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address. ~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~ Upstream PR: https://github.com/libunwind/libunwind/pull/333 /cc @GrabYourPitchforks
AaronRobinsonMSFT
2022-03-05T03:45:49Z
2022-03-09T00:57:12Z
220e67755ae6323a01011b83070dff6f84b02519
853e494abd1c1e12d28a76829b4680af7f694afa
Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154 All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address. ~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~ Upstream PR: https://github.com/libunwind/libunwind/pull/333 /cc @GrabYourPitchforks
./src/mono/mono/metadata/abi.c
#include <config.h> #include <mono/utils/mono-memory-model.h> #include <mono/metadata/abi-details.h> typedef struct { const char alignment [MONO_ALIGN_COUNT]; } AbiDetails; #define DECLARE_ABI_DETAILS(I8, I16, I32, I64, F32, F64, PTR) \ const static AbiDetails mono_abi_details = { \ { I8, I16, I32, I64, F32, F64, PTR } \ }; \ #ifdef MONO_CROSS_COMPILE #if TARGET_WASM DECLARE_ABI_DETAILS (1, 2, 4, 8, 4, 8, 4) #elif TARGET_S390X DECLARE_ABI_DETAILS (1, 2, 4, 8, 4, 8, 8) #else #define DECL_OFFSET(struct,field) #define DECL_OFFSET2(struct,field,offset) #define DECL_ALIGN2(type,size) MONO_ALIGN_value_ ##type = size, #define DECL_SIZE(type) #define DECL_SIZE2(type,size) enum { #include "object-offsets.h" }; DECLARE_ABI_DETAILS ( MONO_ALIGN_value_gint8, MONO_ALIGN_value_gint16, MONO_ALIGN_value_gint32, MONO_ALIGN_value_gint64, MONO_ALIGN_value_float, MONO_ALIGN_value_double, MONO_ALIGN_value_gpointer) #endif #else #define MONO_CURRENT_ABI_ALIGNOF_TYPEDEF(type) typedef struct { char c; type x; } Mono_Align_Struct_ ##type; #define MONO_CURRENT_ABI_ALIGNOF(type) ((int)G_STRUCT_OFFSET(Mono_Align_Struct_ ##type, x)) /* Needed by MONO_CURRENT_ABI_ALIGNOF */ MONO_CURRENT_ABI_ALIGNOF_TYPEDEF(gint8) MONO_CURRENT_ABI_ALIGNOF_TYPEDEF(gint16) MONO_CURRENT_ABI_ALIGNOF_TYPEDEF(gint32) MONO_CURRENT_ABI_ALIGNOF_TYPEDEF(gint64) MONO_CURRENT_ABI_ALIGNOF_TYPEDEF(float) MONO_CURRENT_ABI_ALIGNOF_TYPEDEF(double) MONO_CURRENT_ABI_ALIGNOF_TYPEDEF(gpointer) DECLARE_ABI_DETAILS ( MONO_CURRENT_ABI_ALIGNOF (gint8), MONO_CURRENT_ABI_ALIGNOF (gint16), MONO_CURRENT_ABI_ALIGNOF (gint32), MONO_CURRENT_ABI_ALIGNOF (gint64), MONO_CURRENT_ABI_ALIGNOF (float), MONO_CURRENT_ABI_ALIGNOF (double), MONO_CURRENT_ABI_ALIGNOF (gpointer)) #endif int mono_abi_alignment (CoreTypeAlign type) { return mono_abi_details.alignment [type]; }
#include <config.h> #include <mono/utils/mono-memory-model.h> #include <mono/metadata/abi-details.h> typedef struct { const char alignment [MONO_ALIGN_COUNT]; } AbiDetails; #define DECLARE_ABI_DETAILS(I8, I16, I32, I64, F32, F64, PTR) \ const static AbiDetails mono_abi_details = { \ { I8, I16, I32, I64, F32, F64, PTR } \ }; \ #ifdef MONO_CROSS_COMPILE #if TARGET_WASM DECLARE_ABI_DETAILS (1, 2, 4, 8, 4, 8, 4) #elif TARGET_S390X DECLARE_ABI_DETAILS (1, 2, 4, 8, 4, 8, 8) #else #define DECL_OFFSET(struct,field) #define DECL_OFFSET2(struct,field,offset) #define DECL_ALIGN2(type,size) MONO_ALIGN_value_ ##type = size, #define DECL_SIZE(type) #define DECL_SIZE2(type,size) enum { #include "object-offsets.h" }; DECLARE_ABI_DETAILS ( MONO_ALIGN_value_gint8, MONO_ALIGN_value_gint16, MONO_ALIGN_value_gint32, MONO_ALIGN_value_gint64, MONO_ALIGN_value_float, MONO_ALIGN_value_double, MONO_ALIGN_value_gpointer) #endif #else #define MONO_CURRENT_ABI_ALIGNOF_TYPEDEF(type) typedef struct { char c; type x; } Mono_Align_Struct_ ##type; #define MONO_CURRENT_ABI_ALIGNOF(type) ((int)G_STRUCT_OFFSET(Mono_Align_Struct_ ##type, x)) /* Needed by MONO_CURRENT_ABI_ALIGNOF */ MONO_CURRENT_ABI_ALIGNOF_TYPEDEF(gint8) MONO_CURRENT_ABI_ALIGNOF_TYPEDEF(gint16) MONO_CURRENT_ABI_ALIGNOF_TYPEDEF(gint32) MONO_CURRENT_ABI_ALIGNOF_TYPEDEF(gint64) MONO_CURRENT_ABI_ALIGNOF_TYPEDEF(float) MONO_CURRENT_ABI_ALIGNOF_TYPEDEF(double) MONO_CURRENT_ABI_ALIGNOF_TYPEDEF(gpointer) DECLARE_ABI_DETAILS ( MONO_CURRENT_ABI_ALIGNOF (gint8), MONO_CURRENT_ABI_ALIGNOF (gint16), MONO_CURRENT_ABI_ALIGNOF (gint32), MONO_CURRENT_ABI_ALIGNOF (gint64), MONO_CURRENT_ABI_ALIGNOF (float), MONO_CURRENT_ABI_ALIGNOF (double), MONO_CURRENT_ABI_ALIGNOF (gpointer)) #endif int mono_abi_alignment (CoreTypeAlign type) { return mono_abi_details.alignment [type]; }
-1
dotnet/runtime
66,234
Remove compiler warning suppression
Contributes to https://github.com/dotnet/runtime/issues/66154 All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address. ~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~ Upstream PR: https://github.com/libunwind/libunwind/pull/333 /cc @GrabYourPitchforks
AaronRobinsonMSFT
2022-03-05T03:45:49Z
2022-03-09T00:57:12Z
220e67755ae6323a01011b83070dff6f84b02519
853e494abd1c1e12d28a76829b4680af7f694afa
Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154 All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address. ~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~ Upstream PR: https://github.com/libunwind/libunwind/pull/333 /cc @GrabYourPitchforks
./src/coreclr/CMakeLists.txt
cmake_minimum_required(VERSION 3.6.2) cmake_policy(SET CMP0042 NEW) # MACOSX_RPATH is enabled by default. if (CMAKE_VERSION VERSION_GREATER 3.7 OR CMAKE_VERSION VERSION_EQUAL 3.7) cmake_policy(SET CMP0066 NEW) # Honor per-config flags in try_compile() source-file signature. endif() if (CMAKE_VERSION VERSION_GREATER 3.8 OR CMAKE_VERSION VERSION_EQUAL 3.8) cmake_policy(SET CMP0067 NEW) # Honor language standard in try_compile() source-file signature endif() if (CMAKE_VERSION VERSION_GREATER 3.15 OR CMAKE_VERSION VERSION_EQUAL 3.15) cmake_policy(SET CMP0091 NEW) # MSVC runtime library flags are selected by an abstraction. endif() # Set the project name project(CoreCLR) include(../../eng/native/configurepaths.cmake) include(${CLR_ENG_NATIVE_DIR}/configurecompiler.cmake) include_directories("${CLR_SRC_NATIVE_DIR}") if(MSVC) set(CMAKE_CXX_STANDARD_LIBRARIES "") # do not link against standard win32 libs i.e. kernel32, uuid, user32, etc. endif (MSVC) # Set commonly used directory names set(CLR_DIR ${CMAKE_CURRENT_SOURCE_DIR}) set(VM_DIR ${CMAKE_CURRENT_SOURCE_DIR}/vm) set(GENERATED_INCLUDE_DIR ${CMAKE_CURRENT_BINARY_DIR}/inc) set(GENERATED_EVENTING_DIR ${CMAKE_CURRENT_BINARY_DIR}/Eventing) set(PAL_REDEFINES_FILE ${CMAKE_CURRENT_SOURCE_DIR}/dlls/mscordac/palredefines.S) # Avoid logging when skipping up-to-date copies set(CMAKE_INSTALL_MESSAGE LAZY) set(CORECLR_SET_RPATH ON) if(CORECLR_SET_RPATH) # Enable @rpath support for shared libraries. set(MACOSX_RPATH ON) endif(CORECLR_SET_RPATH) OPTION(CLR_CMAKE_ENABLE_CODE_COVERAGE "Enable code coverage" OFF) #---------------------------------------------------- # Cross target Component build specific configuration #---------------------------------------------------- if(CLR_CROSS_COMPONENTS_BUILD) add_definitions(-DCROSS_COMPILE) if(CLR_CMAKE_HOST_ARCH_AMD64 AND (CLR_CMAKE_TARGET_ARCH_ARM OR CLR_CMAKE_TARGET_ARCH_I386)) set(FEATURE_CROSSBITNESS 1) endif(CLR_CMAKE_HOST_ARCH_AMD64 AND (CLR_CMAKE_TARGET_ARCH_ARM OR CLR_CMAKE_TARGET_ARCH_I386)) endif(CLR_CROSS_COMPONENTS_BUILD) #------------------- # Enable PGO support #------------------- include(pgosupport.cmake) #--------------------------------------------------- # Define sub-component targets for the build #--------------------------------------------------- include(components.cmake) #--------------------------- # Build the single file host #--------------------------- if(NOT CLR_CROSS_COMPONENTS_BUILD) set(CLR_SINGLE_FILE_HOST_ONLY 1) add_subdirectory(${CLR_SRC_NATIVE_DIR}/corehost/apphost/static Corehost.Static) add_dependencies(runtime singlefilehost) endif() #------------------------- # Enable C++ EH with SEH #------------------------- if (MSVC) string(REPLACE "/EHsc" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") add_compile_options($<$<COMPILE_LANGUAGE:CXX>:/EHa>) # enable C++ EH (w/ SEH exceptions) endif() #------------------------------- # Include libraries native shims #------------------------------- if(NOT CLR_CROSS_COMPONENTS_BUILD) set(STATIC_LIBS_ONLY 1) add_subdirectory(${CLR_SRC_NATIVE_DIR}/libs libs-native) endif(NOT CLR_CROSS_COMPONENTS_BUILD) #----------------------------------------- # Add Projects # - project which require platform header not clr's # - do not depend on clr's compile definitions #----------------------------------------- if(CLR_CMAKE_HOST_UNIX) if(CLR_CMAKE_TARGET_ANDROID) find_library(LZMA NAMES lzma) if(LZMA STREQUAL LZMA-NOTFOUND) message(FATAL_ERROR "Cannot find liblzma.") endif(LZMA STREQUAL LZMA-NOTFOUND) find_library(ANDROID_GLOB NAMES android-glob) if(ANDROID_GLOB STREQUAL ANDROID_GLOB-NOTFOUND) message(FATAL_ERROR "Cannot find android-glob.") endif() endif() add_subdirectory(pal) add_subdirectory(hosts) else(CLR_CMAKE_HOST_UNIX) if(CLR_CMAKE_TARGET_UNIX) add_subdirectory(pal/src/libunwind) endif(CLR_CMAKE_TARGET_UNIX) endif(CLR_CMAKE_HOST_UNIX) # Add this subdir. We install the headers for the jit. add_subdirectory(pal/prebuilt/inc) # These need to happen before the VM and debug-pal includes. set(EP_GENERATED_HEADER_PATH "${GENERATED_INCLUDE_DIR}") include (${CLR_SRC_NATIVE_DIR}/eventpipe/configure.cmake) add_subdirectory(debug/debug-pal) add_subdirectory(minipal) if(CLR_CMAKE_TARGET_WIN32) add_subdirectory(gc/sample) endif() #------------------------------------- # Include directory directives #------------------------------------- # Include the basic prebuilt headers - required for getting fileversion resource details. include_directories("pal/prebuilt/inc") include_directories(${CLR_ARTIFACTS_OBJ_DIR}) add_subdirectory(tools/aot/jitinterface) if(NOT CLR_CROSS_COMPONENTS_BUILD) # NativeAOT only buildable for a subset of CoreCLR-supported configurations if((CLR_CMAKE_HOST_LINUX OR CLR_CMAKE_HOST_OSX OR CLR_CMAKE_HOST_WIN32) AND (CLR_CMAKE_HOST_ARCH_ARM64 OR CLR_CMAKE_HOST_ARCH_AMD64) AND NOT (CLR_CMAKE_HOST_OSX AND CLR_CMAKE_HOST_ARCH_ARM64)) add_subdirectory(nativeaot) endif() endif(NOT CLR_CROSS_COMPONENTS_BUILD) # Above projects do not build with these compile options # All of the compiler options are specified in file compileoptions.cmake # Do not add any new options here. They should be added in compileoptions.cmake if(CLR_CMAKE_HOST_WIN32) add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/Zl>) # omit default library name in .OBJ endif(CLR_CMAKE_HOST_WIN32) #-------------------------------- # Definition directives # - all clr specific compile definitions should be included in this file # - all clr specific feature variable should also be added in this file #---------------------------------- include(clrdefinitions.cmake) if(FEATURE_STANDALONE_GC) add_definitions(-DFEATURE_STANDALONE_GC) add_subdirectory(gc) endif(FEATURE_STANDALONE_GC) if (CLR_CMAKE_HOST_UNIX) include_directories("pal/inc") include_directories("pal/inc/rt") include_directories("pal/src/safecrt") endif (CLR_CMAKE_HOST_UNIX) #------------------------------ # Add Product Directories #------------------------------ include_directories("inc") include_directories("debug/inc") include_directories("debug/inc/${ARCH_SOURCES_DIR}") include_directories("debug/inc/dump") include_directories("md/inc") include_directories("classlibnative/bcltype") include_directories("classlibnative/cryptography") include_directories("classlibnative/inc") include_directories("${GENERATED_INCLUDE_DIR}") include_directories("hosts/inc") include_directories("minipal") if(CLR_CMAKE_TARGET_WIN32 AND FEATURE_EVENT_TRACE) include_directories("${GENERATED_INCLUDE_DIR}/etw") endif(CLR_CMAKE_TARGET_WIN32 AND FEATURE_EVENT_TRACE) add_subdirectory(debug/dbgutil) if(CLR_CMAKE_HOST_UNIX) if(CLR_CMAKE_HOST_OSX OR (CLR_CMAKE_HOST_LINUX AND NOT CLR_CMAKE_HOST_UNIX_X86 AND NOT CLR_CMAKE_HOST_ANDROID)) add_subdirectory(debug/createdump) endif(CLR_CMAKE_HOST_OSX OR (CLR_CMAKE_HOST_LINUX AND NOT CLR_CMAKE_HOST_UNIX_X86 AND NOT CLR_CMAKE_HOST_ANDROID)) # Include the dummy c++ include files include_directories("pal/inc/rt/cpp") # This prevents inclusion of standard C compiler headers add_compile_options(-nostdinc) set (NATIVE_RESOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/nativeresources) include_directories(${NATIVE_RESOURCE_DIR}) set (PROCESS_RC_SCRIPT ${NATIVE_RESOURCE_DIR}/processrc.sh) set (RESOURCE_STRING_HEADER_DIR ${NATIVE_RESOURCE_DIR}) # Create a command to create a C++ source file containing an array of # NativeStringResource structs which represent the information from a # given Windows .rc file. The target C++ file path is returned in the # variable specified by the TARGET_FILE parameter. function(build_resources SOURCE TARGET_NAME TARGET_FILE) set(PREPROCESSED_SOURCE ${CMAKE_CURRENT_BINARY_DIR}/${TARGET_NAME}.rc.i) preprocess_file(${SOURCE} ${PREPROCESSED_SOURCE}) set(RESOURCE_ENTRY_ARRAY_CPP ${CMAKE_CURRENT_BINARY_DIR}/${TARGET_NAME}.cpp) add_custom_command( OUTPUT ${RESOURCE_ENTRY_ARRAY_CPP} # Convert the preprocessed .rc file to a C++ file which will be used to make a static lib. COMMAND ${PROCESS_RC_SCRIPT} ${PREPROCESSED_SOURCE} ${TARGET_NAME} >${RESOURCE_ENTRY_ARRAY_CPP} DEPENDS ${PREPROCESSED_SOURCE} ${PROCESS_RC_SCRIPT} ) include_directories(${RESOURCE_STRING_HEADER_DIR}) set(${TARGET_FILE} ${RESOURCE_ENTRY_ARRAY_CPP} PARENT_SCOPE) endfunction() add_subdirectory(nativeresources) endif(CLR_CMAKE_HOST_UNIX) add_subdirectory(utilcode) add_subdirectory(inc) if(CLR_CMAKE_HOST_UNIX) add_subdirectory(palrt) endif(CLR_CMAKE_HOST_UNIX) add_subdirectory(ilasm) add_subdirectory(ildasm) add_subdirectory(gcinfo) add_subdirectory(jit) add_subdirectory(vm) add_subdirectory(md) add_subdirectory(debug) add_subdirectory(binder) add_subdirectory(classlibnative) add_subdirectory(dlls) add_subdirectory(tools) add_subdirectory(unwinder) add_subdirectory(interop) if(CLR_CMAKE_HOST_WIN32) add_subdirectory(hosts) endif(CLR_CMAKE_HOST_WIN32) #---------------------------------------------------- # Cross target Component install configuration #---------------------------------------------------- if(CLR_CROSS_COMPONENTS_BUILD) include(crosscomponents.cmake) endif(CLR_CROSS_COMPONENTS_BUILD)
cmake_minimum_required(VERSION 3.6.2) cmake_policy(SET CMP0042 NEW) # MACOSX_RPATH is enabled by default. if (CMAKE_VERSION VERSION_GREATER 3.7 OR CMAKE_VERSION VERSION_EQUAL 3.7) cmake_policy(SET CMP0066 NEW) # Honor per-config flags in try_compile() source-file signature. endif() if (CMAKE_VERSION VERSION_GREATER 3.8 OR CMAKE_VERSION VERSION_EQUAL 3.8) cmake_policy(SET CMP0067 NEW) # Honor language standard in try_compile() source-file signature endif() if (CMAKE_VERSION VERSION_GREATER 3.15 OR CMAKE_VERSION VERSION_EQUAL 3.15) cmake_policy(SET CMP0091 NEW) # MSVC runtime library flags are selected by an abstraction. endif() # Set the project name project(CoreCLR) include(../../eng/native/configurepaths.cmake) include(${CLR_ENG_NATIVE_DIR}/configurecompiler.cmake) include_directories("${CLR_SRC_NATIVE_DIR}") if(MSVC) set(CMAKE_CXX_STANDARD_LIBRARIES "") # do not link against standard win32 libs i.e. kernel32, uuid, user32, etc. endif (MSVC) # Set commonly used directory names set(CLR_DIR ${CMAKE_CURRENT_SOURCE_DIR}) set(VM_DIR ${CMAKE_CURRENT_SOURCE_DIR}/vm) set(GENERATED_INCLUDE_DIR ${CMAKE_CURRENT_BINARY_DIR}/inc) set(GENERATED_EVENTING_DIR ${CMAKE_CURRENT_BINARY_DIR}/Eventing) set(PAL_REDEFINES_FILE ${CMAKE_CURRENT_SOURCE_DIR}/dlls/mscordac/palredefines.S) # Avoid logging when skipping up-to-date copies set(CMAKE_INSTALL_MESSAGE LAZY) set(CORECLR_SET_RPATH ON) if(CORECLR_SET_RPATH) # Enable @rpath support for shared libraries. set(MACOSX_RPATH ON) endif(CORECLR_SET_RPATH) OPTION(CLR_CMAKE_ENABLE_CODE_COVERAGE "Enable code coverage" OFF) #---------------------------------------------------- # Cross target Component build specific configuration #---------------------------------------------------- if(CLR_CROSS_COMPONENTS_BUILD) add_definitions(-DCROSS_COMPILE) if(CLR_CMAKE_HOST_ARCH_AMD64 AND (CLR_CMAKE_TARGET_ARCH_ARM OR CLR_CMAKE_TARGET_ARCH_I386)) set(FEATURE_CROSSBITNESS 1) endif(CLR_CMAKE_HOST_ARCH_AMD64 AND (CLR_CMAKE_TARGET_ARCH_ARM OR CLR_CMAKE_TARGET_ARCH_I386)) endif(CLR_CROSS_COMPONENTS_BUILD) #------------------- # Enable PGO support #------------------- include(pgosupport.cmake) #--------------------------------------------------- # Define sub-component targets for the build #--------------------------------------------------- include(components.cmake) #--------------------------- # Build the single file host #--------------------------- if(NOT CLR_CROSS_COMPONENTS_BUILD) set(CLR_SINGLE_FILE_HOST_ONLY 1) add_subdirectory(${CLR_SRC_NATIVE_DIR}/corehost/apphost/static Corehost.Static) add_dependencies(runtime singlefilehost) endif() #------------------------- # Enable C++ EH with SEH #------------------------- if (MSVC) string(REPLACE "/EHsc" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") add_compile_options($<$<COMPILE_LANGUAGE:CXX>:/EHa>) # enable C++ EH (w/ SEH exceptions) endif() #------------------------------- # Include libraries native shims #------------------------------- if(NOT CLR_CROSS_COMPONENTS_BUILD) set(STATIC_LIBS_ONLY 1) add_subdirectory(${CLR_SRC_NATIVE_DIR}/libs libs-native) endif(NOT CLR_CROSS_COMPONENTS_BUILD) #----------------------------------------- # Add Projects # - project which require platform header not clr's # - do not depend on clr's compile definitions #----------------------------------------- if(CLR_CMAKE_HOST_UNIX) if(CLR_CMAKE_TARGET_ANDROID) find_library(LZMA NAMES lzma) if(LZMA STREQUAL LZMA-NOTFOUND) message(FATAL_ERROR "Cannot find liblzma.") endif(LZMA STREQUAL LZMA-NOTFOUND) find_library(ANDROID_GLOB NAMES android-glob) if(ANDROID_GLOB STREQUAL ANDROID_GLOB-NOTFOUND) message(FATAL_ERROR "Cannot find android-glob.") endif() endif() add_subdirectory(pal) add_subdirectory(hosts) else(CLR_CMAKE_HOST_UNIX) if(CLR_CMAKE_TARGET_UNIX) add_subdirectory(pal/src/libunwind) endif(CLR_CMAKE_TARGET_UNIX) endif(CLR_CMAKE_HOST_UNIX) # Add this subdir. We install the headers for the jit. add_subdirectory(pal/prebuilt/inc) # These need to happen before the VM and debug-pal includes. set(EP_GENERATED_HEADER_PATH "${GENERATED_INCLUDE_DIR}") include (${CLR_SRC_NATIVE_DIR}/eventpipe/configure.cmake) add_subdirectory(debug/debug-pal) add_subdirectory(minipal) if(CLR_CMAKE_TARGET_WIN32) add_subdirectory(gc/sample) endif() #------------------------------------- # Include directory directives #------------------------------------- # Include the basic prebuilt headers - required for getting fileversion resource details. include_directories("pal/prebuilt/inc") include_directories(${CLR_ARTIFACTS_OBJ_DIR}) add_subdirectory(tools/aot/jitinterface) if(NOT CLR_CROSS_COMPONENTS_BUILD) # NativeAOT only buildable for a subset of CoreCLR-supported configurations if((CLR_CMAKE_HOST_LINUX OR CLR_CMAKE_HOST_OSX OR CLR_CMAKE_HOST_WIN32) AND (CLR_CMAKE_HOST_ARCH_ARM64 OR CLR_CMAKE_HOST_ARCH_AMD64) AND NOT (CLR_CMAKE_HOST_OSX AND CLR_CMAKE_HOST_ARCH_ARM64)) add_subdirectory(nativeaot) endif() endif(NOT CLR_CROSS_COMPONENTS_BUILD) # Above projects do not build with these compile options # All of the compiler options are specified in file compileoptions.cmake # Do not add any new options here. They should be added in compileoptions.cmake if(CLR_CMAKE_HOST_WIN32) add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/Zl>) # omit default library name in .OBJ endif(CLR_CMAKE_HOST_WIN32) #-------------------------------- # Definition directives # - all clr specific compile definitions should be included in this file # - all clr specific feature variable should also be added in this file #---------------------------------- include(clrdefinitions.cmake) if(FEATURE_STANDALONE_GC) add_definitions(-DFEATURE_STANDALONE_GC) add_subdirectory(gc) endif(FEATURE_STANDALONE_GC) if (CLR_CMAKE_HOST_UNIX) include_directories("pal/inc") include_directories("pal/inc/rt") include_directories("pal/src/safecrt") endif (CLR_CMAKE_HOST_UNIX) #------------------------------ # Add Product Directories #------------------------------ include_directories("inc") include_directories("debug/inc") include_directories("debug/inc/${ARCH_SOURCES_DIR}") include_directories("debug/inc/dump") include_directories("md/inc") include_directories("classlibnative/bcltype") include_directories("classlibnative/cryptography") include_directories("classlibnative/inc") include_directories("${GENERATED_INCLUDE_DIR}") include_directories("hosts/inc") include_directories("minipal") if(CLR_CMAKE_TARGET_WIN32 AND FEATURE_EVENT_TRACE) include_directories("${GENERATED_INCLUDE_DIR}/etw") endif(CLR_CMAKE_TARGET_WIN32 AND FEATURE_EVENT_TRACE) add_subdirectory(debug/dbgutil) if(CLR_CMAKE_HOST_UNIX) if(CLR_CMAKE_HOST_OSX OR (CLR_CMAKE_HOST_LINUX AND NOT CLR_CMAKE_HOST_UNIX_X86 AND NOT CLR_CMAKE_HOST_ANDROID)) add_subdirectory(debug/createdump) endif(CLR_CMAKE_HOST_OSX OR (CLR_CMAKE_HOST_LINUX AND NOT CLR_CMAKE_HOST_UNIX_X86 AND NOT CLR_CMAKE_HOST_ANDROID)) # Include the dummy c++ include files include_directories("pal/inc/rt/cpp") # This prevents inclusion of standard C compiler headers add_compile_options(-nostdinc) set (NATIVE_RESOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/nativeresources) include_directories(${NATIVE_RESOURCE_DIR}) set (PROCESS_RC_SCRIPT ${NATIVE_RESOURCE_DIR}/processrc.sh) set (RESOURCE_STRING_HEADER_DIR ${NATIVE_RESOURCE_DIR}) # Create a command to create a C++ source file containing an array of # NativeStringResource structs which represent the information from a # given Windows .rc file. The target C++ file path is returned in the # variable specified by the TARGET_FILE parameter. function(build_resources SOURCE TARGET_NAME TARGET_FILE) set(PREPROCESSED_SOURCE ${CMAKE_CURRENT_BINARY_DIR}/${TARGET_NAME}.rc.i) preprocess_file(${SOURCE} ${PREPROCESSED_SOURCE}) set(RESOURCE_ENTRY_ARRAY_CPP ${CMAKE_CURRENT_BINARY_DIR}/${TARGET_NAME}.cpp) add_custom_command( OUTPUT ${RESOURCE_ENTRY_ARRAY_CPP} # Convert the preprocessed .rc file to a C++ file which will be used to make a static lib. COMMAND ${PROCESS_RC_SCRIPT} ${PREPROCESSED_SOURCE} ${TARGET_NAME} >${RESOURCE_ENTRY_ARRAY_CPP} DEPENDS ${PREPROCESSED_SOURCE} ${PROCESS_RC_SCRIPT} ) include_directories(${RESOURCE_STRING_HEADER_DIR}) set(${TARGET_FILE} ${RESOURCE_ENTRY_ARRAY_CPP} PARENT_SCOPE) endfunction() add_subdirectory(nativeresources) endif(CLR_CMAKE_HOST_UNIX) add_subdirectory(utilcode) add_subdirectory(inc) if(CLR_CMAKE_HOST_UNIX) add_subdirectory(palrt) endif(CLR_CMAKE_HOST_UNIX) add_subdirectory(ilasm) add_subdirectory(ildasm) add_subdirectory(gcinfo) add_subdirectory(jit) add_subdirectory(vm) add_subdirectory(md) add_subdirectory(debug) add_subdirectory(binder) add_subdirectory(classlibnative) add_subdirectory(dlls) add_subdirectory(tools) add_subdirectory(unwinder) add_subdirectory(interop) if(CLR_CMAKE_HOST_WIN32) add_subdirectory(hosts) endif(CLR_CMAKE_HOST_WIN32) #---------------------------------------------------- # Cross target Component install configuration #---------------------------------------------------- if(CLR_CROSS_COMPONENTS_BUILD) include(crosscomponents.cmake) endif(CLR_CROSS_COMPONENTS_BUILD)
-1
dotnet/runtime
66,234
Remove compiler warning suppression
Contributes to https://github.com/dotnet/runtime/issues/66154 All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address. ~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~ Upstream PR: https://github.com/libunwind/libunwind/pull/333 /cc @GrabYourPitchforks
AaronRobinsonMSFT
2022-03-05T03:45:49Z
2022-03-09T00:57:12Z
220e67755ae6323a01011b83070dff6f84b02519
853e494abd1c1e12d28a76829b4680af7f694afa
Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154 All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address. ~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~ Upstream PR: https://github.com/libunwind/libunwind/pull/333 /cc @GrabYourPitchforks
./src/coreclr/pal/src/libunwind/src/aarch64/Lstash_frame.c
#define UNW_LOCAL_ONLY #include <libunwind.h> #if defined(UNW_LOCAL_ONLY) && !defined(UNW_REMOTE_ONLY) #include "Gstash_frame.c" #endif
#define UNW_LOCAL_ONLY #include <libunwind.h> #if defined(UNW_LOCAL_ONLY) && !defined(UNW_REMOTE_ONLY) #include "Gstash_frame.c" #endif
-1
dotnet/runtime
66,234
Remove compiler warning suppression
Contributes to https://github.com/dotnet/runtime/issues/66154 All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address. ~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~ Upstream PR: https://github.com/libunwind/libunwind/pull/333 /cc @GrabYourPitchforks
AaronRobinsonMSFT
2022-03-05T03:45:49Z
2022-03-09T00:57:12Z
220e67755ae6323a01011b83070dff6f84b02519
853e494abd1c1e12d28a76829b4680af7f694afa
Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154 All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address. ~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~ Upstream PR: https://github.com/libunwind/libunwind/pull/333 /cc @GrabYourPitchforks
./src/coreclr/tools/superpmi/superpmi-shared/spmiutil.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. //---------------------------------------------------------- // SPMIUtil.cpp - General utility functions //---------------------------------------------------------- #include "standardpch.h" #include "logging.h" #include "spmiutil.h" static bool breakOnDebugBreakorAV = false; bool BreakOnDebugBreakorAV() { return breakOnDebugBreakorAV; } void SetBreakOnDebugBreakOrAV(bool value) { breakOnDebugBreakorAV = value; } static bool breakOnException = false; bool BreakOnException() { return breakOnException; } void SetBreakOnException(bool value) { breakOnException = value; } void DebugBreakorAV(int val) { if (IsDebuggerPresent()) { if (val == 0) __debugbreak(); if (BreakOnDebugBreakorAV()) __debugbreak(); } int exception_code = EXCEPTIONCODE_DebugBreakorAV + val; // assert((EXCEPTIONCODE_DebugBreakorAV <= exception_code) && (exception_code < EXCEPTIONCODE_DebugBreakorAV_MAX)) LogException(exception_code, "DebugBreak or AV Exception %d", val); } char* GetEnvironmentVariableWithDefaultA(const char* envVarName, const char* defaultValue) { char* retString = nullptr; // Figure out how much space we need to allocate DWORD dwRetVal = ::GetEnvironmentVariableA(envVarName, nullptr, 0); if (dwRetVal != 0) { retString = new char[dwRetVal]; dwRetVal = ::GetEnvironmentVariableA(envVarName, retString, dwRetVal); } else { if (defaultValue != nullptr) { dwRetVal = (DWORD)strlen(defaultValue) + 1; // add one for null terminator retString = new char[dwRetVal]; memcpy_s(retString, dwRetVal, defaultValue, dwRetVal); } } return retString; } WCHAR* GetEnvironmentVariableWithDefaultW(const WCHAR* envVarName, const WCHAR* defaultValue) { WCHAR* retString = nullptr; // Figure out how much space we need to allocate DWORD dwRetVal = ::GetEnvironmentVariableW(envVarName, nullptr, 0); if (dwRetVal != 0) { retString = new WCHAR[dwRetVal]; dwRetVal = ::GetEnvironmentVariableW(envVarName, retString, dwRetVal); } else { if (defaultValue != nullptr) { dwRetVal = (DWORD)wcslen(defaultValue) + 1; // add one for null terminator retString = new WCHAR[dwRetVal]; memcpy_s(retString, dwRetVal * sizeof(WCHAR), defaultValue, dwRetVal * sizeof(WCHAR)); } } return retString; } #ifdef TARGET_UNIX // For some reason, the PAL doesn't have GetCommandLineA(). So write it. LPSTR GetCommandLineA() { LPSTR pCmdLine = nullptr; LPWSTR pwCmdLine = GetCommandLineW(); if (pwCmdLine != nullptr) { // Convert to ASCII int n = WideCharToMultiByte(CP_ACP, 0, pwCmdLine, -1, nullptr, 0, nullptr, nullptr); if (n == 0) { LogError("MultiByteToWideChar failed %d", GetLastError()); return nullptr; } pCmdLine = new char[n]; int n2 = WideCharToMultiByte(CP_ACP, 0, pwCmdLine, -1, pCmdLine, n, nullptr, nullptr); if ((n2 == 0) || (n2 != n)) { LogError("MultiByteToWideChar failed %d", GetLastError()); return nullptr; } } return pCmdLine; } #endif // TARGET_UNIX bool LoadRealJitLib(HMODULE& jitLib, WCHAR* jitLibPath) { // Load Library if (jitLib == NULL) { if (jitLibPath == nullptr) { LogError("LoadRealJitLib - No real jit path"); return false; } jitLib = ::LoadLibraryW(jitLibPath); if (jitLib == NULL) { LogError("LoadRealJitLib - LoadLibrary failed to load '%ws' (0x%08x)", jitLibPath, ::GetLastError()); return false; } } return true; } void ReplaceIllegalCharacters(WCHAR* fileName) { WCHAR* quote = nullptr; // If there are any quotes in the file name convert them to spaces. while ((quote = wcsstr(fileName, W("\""))) != nullptr) { *quote = W(' '); } // Convert non-ASCII to ASCII for simplicity. for (quote = fileName; *quote != '\0'; quote++) { WCHAR ch = *quote; if ((ch <= 32) || (ch >= 127)) // Only allow textual ASCII characters { *quote = W('_'); } } // Remove any illegal or annoying characters from the file name by converting them to underscores. while ((quote = wcspbrk(fileName, W("()=<>:\"/\\|?! *.,"))) != nullptr) { *quote = W('_'); } } // All lengths in this function exclude the terminal NULL. WCHAR* GetResultFileName(const WCHAR* folderPath, const WCHAR* fileName, const WCHAR* extension) { const size_t extensionLength = wcslen(extension); const size_t fileNameLength = wcslen(fileName); const size_t randomStringLength = 8; const size_t maxPathLength = MAX_PATH - 50; // See how long the folder part is, and start building the file path with the folder part. // WCHAR* fullPath = new WCHAR[MAX_PATH]; fullPath[0] = W('\0'); const size_t folderPathLength = GetFullPathNameW(folderPath, MAX_PATH, (LPWSTR)fullPath, NULL); if (folderPathLength == 0) { LogError("GetResultFileName - can't resolve folder path '%ws'", folderPath); return nullptr; } // Account for the folder, directory separator and extension. // size_t fullPathLength = folderPathLength + 1 + extensionLength; // If we won't have room for a minimal file name part, bail. // if ((fullPathLength + randomStringLength) > maxPathLength) { LogError("GetResultFileName - folder path '%ws' length + minimal file name exceeds limit %d", fullPath, maxPathLength); return nullptr; } // Now figure out the file name part. // const size_t maxFileNameLength = maxPathLength - fullPathLength; size_t usableFileNameLength = min(fileNameLength, maxFileNameLength - randomStringLength); fullPathLength += usableFileNameLength + randomStringLength; // Append the file name part // wcsncat_s(fullPath, fullPathLength + 1, DIRECTORY_SEPARATOR_STR_W, 1); wcsncat_s(fullPath, fullPathLength + 1, fileName, usableFileNameLength); // Clean up anything in the file part that can't be in a file name. // ReplaceIllegalCharacters(fullPath + folderPathLength + 1); // Append a random string to improve uniqueness. // unsigned randomNumber = 0; #ifdef TARGET_UNIX PAL_Random(&randomNumber, sizeof(randomNumber)); #else // !TARGET_UNIX rand_s(&randomNumber); #endif // !TARGET_UNIX WCHAR randomString[randomStringLength + 1]; swprintf_s(randomString, randomStringLength + 1, W("%08X"), randomNumber); wcsncat_s(fullPath, fullPathLength + 1, randomString, randomStringLength); // Append extension // wcsncat_s(fullPath, fullPathLength + 1, extension, extensionLength); return fullPath; } #ifdef TARGET_AMD64 static SPMI_TARGET_ARCHITECTURE SpmiTargetArchitecture = SPMI_TARGET_ARCHITECTURE_AMD64; #elif defined(TARGET_X86) static SPMI_TARGET_ARCHITECTURE SpmiTargetArchitecture = SPMI_TARGET_ARCHITECTURE_X86; #elif defined(TARGET_ARM) static SPMI_TARGET_ARCHITECTURE SpmiTargetArchitecture = SPMI_TARGET_ARCHITECTURE_ARM; #elif defined(TARGET_ARM64) static SPMI_TARGET_ARCHITECTURE SpmiTargetArchitecture = SPMI_TARGET_ARCHITECTURE_ARM64; #else #error Unsupported architecture #endif SPMI_TARGET_ARCHITECTURE GetSpmiTargetArchitecture() { return SpmiTargetArchitecture; } void SetSpmiTargetArchitecture(SPMI_TARGET_ARCHITECTURE spmiTargetArchitecture) { SpmiTargetArchitecture = spmiTargetArchitecture; } // The following functions are used for arm64/arm32 relocation processing. // They are copies of the code in src\coreclr\utilcode\util.cpp. // We decided to copy them instead of linking with utilcode library // to avoid introducing additional runtime dependencies. void PutArm64Rel28(UINT32* pCode, INT32 imm28) { UINT32 branchInstr = *pCode; branchInstr &= 0xFC000000; branchInstr |= ((imm28 >> 2) & 0x03FFFFFF); *pCode = branchInstr; } void PutArm64Rel21(UINT32* pCode, INT32 imm21) { UINT32 adrpInstr = *pCode; adrpInstr &= 0x9F00001F; INT32 immlo = imm21 & 0x03; INT32 immhi = (imm21 & 0x1FFFFC) >> 2; adrpInstr |= ((immlo << 29) | (immhi << 5)); *pCode = adrpInstr; } void PutArm64Rel12(UINT32* pCode, INT32 imm12) { UINT32 addInstr = *pCode; addInstr &= 0xFFC003FF; addInstr |= (imm12 << 10); *pCode = addInstr; } void PutThumb2Imm16(UINT16* p, UINT16 imm16) { USHORT Opcode0 = p[0]; USHORT Opcode1 = p[1]; Opcode0 &= ~((0xf000 >> 12) | (0x0800 >> 1)); Opcode1 &= ~((0x0700 << 4) | (0x00ff << 0)); Opcode0 |= (imm16 & 0xf000) >> 12; Opcode0 |= (imm16 & 0x0800) >> 1; Opcode1 |= (imm16 & 0x0700) << 4; Opcode1 |= (imm16 & 0x00ff) << 0; p[0] = Opcode0; p[1] = Opcode1; } void PutThumb2Mov32(UINT16* p, UINT32 imm32) { PutThumb2Imm16(p, (UINT16)imm32); PutThumb2Imm16(p + 2, (UINT16)(imm32 >> 16)); } void PutThumb2BlRel24(UINT16* p, INT32 imm24) { USHORT Opcode0 = p[0]; USHORT Opcode1 = p[1]; Opcode0 &= 0xF800; Opcode1 &= 0xD000; UINT32 S = (imm24 & 0x1000000) >> 24; UINT32 J1 = ((imm24 & 0x0800000) >> 23) ^ S ^ 1; UINT32 J2 = ((imm24 & 0x0400000) >> 22) ^ S ^ 1; Opcode0 |= ((imm24 & 0x03FF000) >> 12) | (S << 10); Opcode1 |= ((imm24 & 0x0000FFE) >> 1) | (J1 << 13) | (J2 << 11); p[0] = Opcode0; p[1] = Opcode1; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. //---------------------------------------------------------- // SPMIUtil.cpp - General utility functions //---------------------------------------------------------- #include "standardpch.h" #include "logging.h" #include "spmiutil.h" static bool breakOnDebugBreakorAV = false; bool BreakOnDebugBreakorAV() { return breakOnDebugBreakorAV; } void SetBreakOnDebugBreakOrAV(bool value) { breakOnDebugBreakorAV = value; } static bool breakOnException = false; bool BreakOnException() { return breakOnException; } void SetBreakOnException(bool value) { breakOnException = value; } void DebugBreakorAV(int val) { if (IsDebuggerPresent()) { if (val == 0) __debugbreak(); if (BreakOnDebugBreakorAV()) __debugbreak(); } int exception_code = EXCEPTIONCODE_DebugBreakorAV + val; // assert((EXCEPTIONCODE_DebugBreakorAV <= exception_code) && (exception_code < EXCEPTIONCODE_DebugBreakorAV_MAX)) LogException(exception_code, "DebugBreak or AV Exception %d", val); } char* GetEnvironmentVariableWithDefaultA(const char* envVarName, const char* defaultValue) { char* retString = nullptr; // Figure out how much space we need to allocate DWORD dwRetVal = ::GetEnvironmentVariableA(envVarName, nullptr, 0); if (dwRetVal != 0) { retString = new char[dwRetVal]; dwRetVal = ::GetEnvironmentVariableA(envVarName, retString, dwRetVal); } else { if (defaultValue != nullptr) { dwRetVal = (DWORD)strlen(defaultValue) + 1; // add one for null terminator retString = new char[dwRetVal]; memcpy_s(retString, dwRetVal, defaultValue, dwRetVal); } } return retString; } WCHAR* GetEnvironmentVariableWithDefaultW(const WCHAR* envVarName, const WCHAR* defaultValue) { WCHAR* retString = nullptr; // Figure out how much space we need to allocate DWORD dwRetVal = ::GetEnvironmentVariableW(envVarName, nullptr, 0); if (dwRetVal != 0) { retString = new WCHAR[dwRetVal]; dwRetVal = ::GetEnvironmentVariableW(envVarName, retString, dwRetVal); } else { if (defaultValue != nullptr) { dwRetVal = (DWORD)wcslen(defaultValue) + 1; // add one for null terminator retString = new WCHAR[dwRetVal]; memcpy_s(retString, dwRetVal * sizeof(WCHAR), defaultValue, dwRetVal * sizeof(WCHAR)); } } return retString; } #ifdef TARGET_UNIX // For some reason, the PAL doesn't have GetCommandLineA(). So write it. LPSTR GetCommandLineA() { LPSTR pCmdLine = nullptr; LPWSTR pwCmdLine = GetCommandLineW(); if (pwCmdLine != nullptr) { // Convert to ASCII int n = WideCharToMultiByte(CP_ACP, 0, pwCmdLine, -1, nullptr, 0, nullptr, nullptr); if (n == 0) { LogError("MultiByteToWideChar failed %d", GetLastError()); return nullptr; } pCmdLine = new char[n]; int n2 = WideCharToMultiByte(CP_ACP, 0, pwCmdLine, -1, pCmdLine, n, nullptr, nullptr); if ((n2 == 0) || (n2 != n)) { LogError("MultiByteToWideChar failed %d", GetLastError()); return nullptr; } } return pCmdLine; } #endif // TARGET_UNIX bool LoadRealJitLib(HMODULE& jitLib, WCHAR* jitLibPath) { // Load Library if (jitLib == NULL) { if (jitLibPath == nullptr) { LogError("LoadRealJitLib - No real jit path"); return false; } jitLib = ::LoadLibraryW(jitLibPath); if (jitLib == NULL) { LogError("LoadRealJitLib - LoadLibrary failed to load '%ws' (0x%08x)", jitLibPath, ::GetLastError()); return false; } } return true; } void ReplaceIllegalCharacters(WCHAR* fileName) { WCHAR* quote = nullptr; // If there are any quotes in the file name convert them to spaces. while ((quote = wcsstr(fileName, W("\""))) != nullptr) { *quote = W(' '); } // Convert non-ASCII to ASCII for simplicity. for (quote = fileName; *quote != '\0'; quote++) { WCHAR ch = *quote; if ((ch <= 32) || (ch >= 127)) // Only allow textual ASCII characters { *quote = W('_'); } } // Remove any illegal or annoying characters from the file name by converting them to underscores. while ((quote = wcspbrk(fileName, W("()=<>:\"/\\|?! *.,"))) != nullptr) { *quote = W('_'); } } // All lengths in this function exclude the terminal NULL. WCHAR* GetResultFileName(const WCHAR* folderPath, const WCHAR* fileName, const WCHAR* extension) { const size_t extensionLength = wcslen(extension); const size_t fileNameLength = wcslen(fileName); const size_t randomStringLength = 8; const size_t maxPathLength = MAX_PATH - 50; // See how long the folder part is, and start building the file path with the folder part. // WCHAR* fullPath = new WCHAR[MAX_PATH]; fullPath[0] = W('\0'); const size_t folderPathLength = GetFullPathNameW(folderPath, MAX_PATH, (LPWSTR)fullPath, NULL); if (folderPathLength == 0) { LogError("GetResultFileName - can't resolve folder path '%ws'", folderPath); return nullptr; } // Account for the folder, directory separator and extension. // size_t fullPathLength = folderPathLength + 1 + extensionLength; // If we won't have room for a minimal file name part, bail. // if ((fullPathLength + randomStringLength) > maxPathLength) { LogError("GetResultFileName - folder path '%ws' length + minimal file name exceeds limit %d", fullPath, maxPathLength); return nullptr; } // Now figure out the file name part. // const size_t maxFileNameLength = maxPathLength - fullPathLength; size_t usableFileNameLength = min(fileNameLength, maxFileNameLength - randomStringLength); fullPathLength += usableFileNameLength + randomStringLength; // Append the file name part // wcsncat_s(fullPath, fullPathLength + 1, DIRECTORY_SEPARATOR_STR_W, 1); wcsncat_s(fullPath, fullPathLength + 1, fileName, usableFileNameLength); // Clean up anything in the file part that can't be in a file name. // ReplaceIllegalCharacters(fullPath + folderPathLength + 1); // Append a random string to improve uniqueness. // unsigned randomNumber = 0; #ifdef TARGET_UNIX PAL_Random(&randomNumber, sizeof(randomNumber)); #else // !TARGET_UNIX rand_s(&randomNumber); #endif // !TARGET_UNIX WCHAR randomString[randomStringLength + 1]; swprintf_s(randomString, randomStringLength + 1, W("%08X"), randomNumber); wcsncat_s(fullPath, fullPathLength + 1, randomString, randomStringLength); // Append extension // wcsncat_s(fullPath, fullPathLength + 1, extension, extensionLength); return fullPath; } #ifdef TARGET_AMD64 static SPMI_TARGET_ARCHITECTURE SpmiTargetArchitecture = SPMI_TARGET_ARCHITECTURE_AMD64; #elif defined(TARGET_X86) static SPMI_TARGET_ARCHITECTURE SpmiTargetArchitecture = SPMI_TARGET_ARCHITECTURE_X86; #elif defined(TARGET_ARM) static SPMI_TARGET_ARCHITECTURE SpmiTargetArchitecture = SPMI_TARGET_ARCHITECTURE_ARM; #elif defined(TARGET_ARM64) static SPMI_TARGET_ARCHITECTURE SpmiTargetArchitecture = SPMI_TARGET_ARCHITECTURE_ARM64; #else #error Unsupported architecture #endif SPMI_TARGET_ARCHITECTURE GetSpmiTargetArchitecture() { return SpmiTargetArchitecture; } void SetSpmiTargetArchitecture(SPMI_TARGET_ARCHITECTURE spmiTargetArchitecture) { SpmiTargetArchitecture = spmiTargetArchitecture; } // The following functions are used for arm64/arm32 relocation processing. // They are copies of the code in src\coreclr\utilcode\util.cpp. // We decided to copy them instead of linking with utilcode library // to avoid introducing additional runtime dependencies. void PutArm64Rel28(UINT32* pCode, INT32 imm28) { UINT32 branchInstr = *pCode; branchInstr &= 0xFC000000; branchInstr |= ((imm28 >> 2) & 0x03FFFFFF); *pCode = branchInstr; } void PutArm64Rel21(UINT32* pCode, INT32 imm21) { UINT32 adrpInstr = *pCode; adrpInstr &= 0x9F00001F; INT32 immlo = imm21 & 0x03; INT32 immhi = (imm21 & 0x1FFFFC) >> 2; adrpInstr |= ((immlo << 29) | (immhi << 5)); *pCode = adrpInstr; } void PutArm64Rel12(UINT32* pCode, INT32 imm12) { UINT32 addInstr = *pCode; addInstr &= 0xFFC003FF; addInstr |= (imm12 << 10); *pCode = addInstr; } void PutThumb2Imm16(UINT16* p, UINT16 imm16) { USHORT Opcode0 = p[0]; USHORT Opcode1 = p[1]; Opcode0 &= ~((0xf000 >> 12) | (0x0800 >> 1)); Opcode1 &= ~((0x0700 << 4) | (0x00ff << 0)); Opcode0 |= (imm16 & 0xf000) >> 12; Opcode0 |= (imm16 & 0x0800) >> 1; Opcode1 |= (imm16 & 0x0700) << 4; Opcode1 |= (imm16 & 0x00ff) << 0; p[0] = Opcode0; p[1] = Opcode1; } void PutThumb2Mov32(UINT16* p, UINT32 imm32) { PutThumb2Imm16(p, (UINT16)imm32); PutThumb2Imm16(p + 2, (UINT16)(imm32 >> 16)); } void PutThumb2BlRel24(UINT16* p, INT32 imm24) { USHORT Opcode0 = p[0]; USHORT Opcode1 = p[1]; Opcode0 &= 0xF800; Opcode1 &= 0xD000; UINT32 S = (imm24 & 0x1000000) >> 24; UINT32 J1 = ((imm24 & 0x0800000) >> 23) ^ S ^ 1; UINT32 J2 = ((imm24 & 0x0400000) >> 22) ^ S ^ 1; Opcode0 |= ((imm24 & 0x03FF000) >> 12) | (S << 10); Opcode1 |= ((imm24 & 0x0000FFE) >> 1) | (J1 << 13) | (J2 << 11); p[0] = Opcode0; p[1] = Opcode1; }
-1
dotnet/runtime
66,234
Remove compiler warning suppression
Contributes to https://github.com/dotnet/runtime/issues/66154 All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address. ~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~ Upstream PR: https://github.com/libunwind/libunwind/pull/333 /cc @GrabYourPitchforks
AaronRobinsonMSFT
2022-03-05T03:45:49Z
2022-03-09T00:57:12Z
220e67755ae6323a01011b83070dff6f84b02519
853e494abd1c1e12d28a76829b4680af7f694afa
Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154 All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address. ~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~ Upstream PR: https://github.com/libunwind/libunwind/pull/333 /cc @GrabYourPitchforks
./src/coreclr/vm/generics.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // File: generics.cpp // // // Helper functions for generics prototype // // // ============================================================================ #ifndef _GENERICS_H #define _GENERICS_H #include "typehandle.h" #include "arraylist.h" class CrawlFrame; class DictionaryEntryLayout; // Generics helper functions namespace Generics { // Part of the recursive inheritance graph as defined by ECMA part.II Section 9.2. // // code:MethodTable.DoFullyLoad and code:TypeDesc.DoFullyLoad declare local variable of // this type and initialize it with: // - pointer to the previous (in terms of callstack) RecursionGraph instance, // - the type handle representing the type that is being fully-loaded. // // By walking the RecursionGraph chain, it is possible to tell whether the same type is // not being fully-loaded already. So far this could as well be a description of the // code:TypeHandleList. But aside from the "owner type", RecursionGraph can also hold // part of a directed graph in which nodes are generic variables and edges represent the // is-substituted-by relation. In particular, one RecursionGraph instance maintains nodes // corresponding to generic variables declared by the owner type, and all edges going // out of these nodes. // // As an example consider: // class B<U> { } // class A<T> : B<T> { } // // B's RecursionGraph has one node (U) and no edges. A's RecursionGraph has one node (T) // and one edge from T to U. // // This is how it looks like on the stack: // // A's DoFullyLoad activation - RecursionGraph(NULL, A<>) -> [T] // ^-------- | // | v // B's DoFullyLoad activation - RecursionGraph( | , B<>) -> [U] // // The edges are obviously not real pointers because the destination may not yet be // present on the stack when the edge is being added. Instead the edge end points are // identified by TypeVarTypeDesc pointers. Edges come in two flavors - non-expanding // and expanding, please see ECMA for detailed explanation. Finding an expanding cycle // (i.e. cycle with at least one expanding edge) in the graph means that the types // currently on stack are defined recursively and should be refused by the loader. // Reliable detection of this condition is the ultimate purpose of this class. // // We do not always see all dependencies of a type on the stack. If the dependencies // have been loaded earlier, loading stops there and part of the graph may be missing. // However, this is of no concern because we are only interested in types with cyclic // dependencies, and loading any type from such a closure will cause loading the rest // of it. If part of the rest had been loaded earlier, it would have triggered loading // the current type so there's really no way how expanding cycles can go undetected. // // Observation: if there is a cycle in type dependencies, there will be a moment when // we'll have all the types participating in the cycle on the stack. // // Note that having a cycle in type dependencies is OK as long as it is not an expanding // cycle. The simplest example of a cycle that is not expanding is A<T> : B<A<T>>. That // is a perfectly valid type. // // The most interesting methods in this class are: // * code:RecursionGraph.AddDependency - adds edges according to a type's instantiation. // * code:RecursionGraph.HasExpandingCycle - looks for expanding cycles in the graph. // class RecursionGraph { public: // Just records the owner and links to the previous graph. To actually construct the // graph, call CheckForIllegalRecursion. Without CheckForIllegalRecursion, the // functionality is limited to that of code:TypeHandleList. RecursionGraph(RecursionGraph *pPrev, TypeHandle thOwner); ~RecursionGraph(); // Adds edges generated by the parent and implemented interfaces; returns TRUE iff // an expanding cycle was found after adding the edges. BOOL CheckForIllegalRecursion(); // Returns TRUE iff the given type is already on the stack (in fact an analogue of // code:TypeHandleList.Exists). This is to prevent recursively loading exactly the // same type. static BOOL HasSeenType(RecursionGraph *pDepGraph, TypeHandle thType); #ifndef DACCESS_COMPILE protected: // Adds the specified MT as a dependency (parent or interface) of the owner. // pExpansionVars used internally. void AddDependency(MethodTable *pMT, TypeHandleList *pExpansionVars = NULL); // Adds an edge from pFromVar to pToVar, non-expanding or expanding. void AddEdge(TypeVarTypeDesc *pFromVar, TypeVarTypeDesc *pToVar, BOOL fExpanding); // Represents a node (a generic variable). class Node { friend class RecursionGraph; union { TypeVarTypeDesc *m_pFromVar; // The generic variable represented by this node. ULONG_PTR m_pFromVarAsPtr; // The lowest bit determines the is-visited state. }; ArrayList m_edges; // The outgoing edges (pointers to TypeVarTypeDesc). enum { NODE_VISITED_FLAG = 0x1, // ORed with m_pFromVar if this node is currently being visited. EDGE_EXPANDING_FLAG = 0x1 // ORed with an m_edges element if the edge is expanding. }; public: Node() : m_pFromVar(NULL) { LIMITED_METHOD_CONTRACT; } inline ArrayList *GetEdges() { LIMITED_METHOD_CONTRACT; return &m_edges; } inline TypeVarTypeDesc *GetSourceVar() { LIMITED_METHOD_CONTRACT; return ((TypeVarTypeDesc *)(m_pFromVarAsPtr & ~NODE_VISITED_FLAG)); } inline void SetSourceVar(TypeVarTypeDesc *pVar) { LIMITED_METHOD_CONTRACT; _ASSERTE(!m_pFromVar); m_pFromVar = pVar; } inline BOOL IsVisited() { LIMITED_METHOD_CONTRACT; return (m_pFromVarAsPtr & NODE_VISITED_FLAG); } inline void SetVisited() { LIMITED_METHOD_CONTRACT; m_pFromVarAsPtr |= NODE_VISITED_FLAG; } inline void ClearVisited() { LIMITED_METHOD_CONTRACT; m_pFromVarAsPtr &= ~NODE_VISITED_FLAG; } }; // Recursive worker that checks whether a node is part of an expanding cycle. BOOL HasExpandingCycle(Node *pCurrentNode, Node *pStartNode, BOOL fExpanded = FALSE); protected: // Array of nodes, each representing a generic variable owned by m_thOwner. The // number of nodes is m_thOwner.GetNumGenericArgs() and the order corresponds // to m_thOwner's instantiation. Node *m_pNodes; #endif // !DACCESS_COMPILE protected: RecursionGraph *m_pPrev; TypeHandle m_thOwner; }; // Check for legal instantiations. Returns true if the instantiation is legal. BOOL CheckInstantiation(Instantiation inst); BOOL GetExactInstantiationsOfMethodAndItsClassFromCallInformation( /* in */ MethodDesc *pRepMethod, /* in */ OBJECTREF pThis, /* in */ PTR_VOID pParamTypeArg, /* out*/ TypeHandle *pSpecificClass, /* out*/ MethodDesc** pSpecificMethod); BOOL GetExactInstantiationsOfMethodAndItsClassFromCallInformation( /* in */ MethodDesc *pRepMethod, /* in */ PTR_VOID pExactGenericArgsToken, /* out*/ TypeHandle *pSpecificClass, /* out*/ MethodDesc** pSpecificMethod); }; #endif
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // File: generics.cpp // // // Helper functions for generics prototype // // // ============================================================================ #ifndef _GENERICS_H #define _GENERICS_H #include "typehandle.h" #include "arraylist.h" class CrawlFrame; class DictionaryEntryLayout; // Generics helper functions namespace Generics { // Part of the recursive inheritance graph as defined by ECMA part.II Section 9.2. // // code:MethodTable.DoFullyLoad and code:TypeDesc.DoFullyLoad declare local variable of // this type and initialize it with: // - pointer to the previous (in terms of callstack) RecursionGraph instance, // - the type handle representing the type that is being fully-loaded. // // By walking the RecursionGraph chain, it is possible to tell whether the same type is // not being fully-loaded already. So far this could as well be a description of the // code:TypeHandleList. But aside from the "owner type", RecursionGraph can also hold // part of a directed graph in which nodes are generic variables and edges represent the // is-substituted-by relation. In particular, one RecursionGraph instance maintains nodes // corresponding to generic variables declared by the owner type, and all edges going // out of these nodes. // // As an example consider: // class B<U> { } // class A<T> : B<T> { } // // B's RecursionGraph has one node (U) and no edges. A's RecursionGraph has one node (T) // and one edge from T to U. // // This is how it looks like on the stack: // // A's DoFullyLoad activation - RecursionGraph(NULL, A<>) -> [T] // ^-------- | // | v // B's DoFullyLoad activation - RecursionGraph( | , B<>) -> [U] // // The edges are obviously not real pointers because the destination may not yet be // present on the stack when the edge is being added. Instead the edge end points are // identified by TypeVarTypeDesc pointers. Edges come in two flavors - non-expanding // and expanding, please see ECMA for detailed explanation. Finding an expanding cycle // (i.e. cycle with at least one expanding edge) in the graph means that the types // currently on stack are defined recursively and should be refused by the loader. // Reliable detection of this condition is the ultimate purpose of this class. // // We do not always see all dependencies of a type on the stack. If the dependencies // have been loaded earlier, loading stops there and part of the graph may be missing. // However, this is of no concern because we are only interested in types with cyclic // dependencies, and loading any type from such a closure will cause loading the rest // of it. If part of the rest had been loaded earlier, it would have triggered loading // the current type so there's really no way how expanding cycles can go undetected. // // Observation: if there is a cycle in type dependencies, there will be a moment when // we'll have all the types participating in the cycle on the stack. // // Note that having a cycle in type dependencies is OK as long as it is not an expanding // cycle. The simplest example of a cycle that is not expanding is A<T> : B<A<T>>. That // is a perfectly valid type. // // The most interesting methods in this class are: // * code:RecursionGraph.AddDependency - adds edges according to a type's instantiation. // * code:RecursionGraph.HasExpandingCycle - looks for expanding cycles in the graph. // class RecursionGraph { public: // Just records the owner and links to the previous graph. To actually construct the // graph, call CheckForIllegalRecursion. Without CheckForIllegalRecursion, the // functionality is limited to that of code:TypeHandleList. RecursionGraph(RecursionGraph *pPrev, TypeHandle thOwner); ~RecursionGraph(); // Adds edges generated by the parent and implemented interfaces; returns TRUE iff // an expanding cycle was found after adding the edges. BOOL CheckForIllegalRecursion(); // Returns TRUE iff the given type is already on the stack (in fact an analogue of // code:TypeHandleList.Exists). This is to prevent recursively loading exactly the // same type. static BOOL HasSeenType(RecursionGraph *pDepGraph, TypeHandle thType); #ifndef DACCESS_COMPILE protected: // Adds the specified MT as a dependency (parent or interface) of the owner. // pExpansionVars used internally. void AddDependency(MethodTable *pMT, TypeHandleList *pExpansionVars = NULL); // Adds an edge from pFromVar to pToVar, non-expanding or expanding. void AddEdge(TypeVarTypeDesc *pFromVar, TypeVarTypeDesc *pToVar, BOOL fExpanding); // Represents a node (a generic variable). class Node { friend class RecursionGraph; union { TypeVarTypeDesc *m_pFromVar; // The generic variable represented by this node. ULONG_PTR m_pFromVarAsPtr; // The lowest bit determines the is-visited state. }; ArrayList m_edges; // The outgoing edges (pointers to TypeVarTypeDesc). enum { NODE_VISITED_FLAG = 0x1, // ORed with m_pFromVar if this node is currently being visited. EDGE_EXPANDING_FLAG = 0x1 // ORed with an m_edges element if the edge is expanding. }; public: Node() : m_pFromVar(NULL) { LIMITED_METHOD_CONTRACT; } inline ArrayList *GetEdges() { LIMITED_METHOD_CONTRACT; return &m_edges; } inline TypeVarTypeDesc *GetSourceVar() { LIMITED_METHOD_CONTRACT; return ((TypeVarTypeDesc *)(m_pFromVarAsPtr & ~NODE_VISITED_FLAG)); } inline void SetSourceVar(TypeVarTypeDesc *pVar) { LIMITED_METHOD_CONTRACT; _ASSERTE(!m_pFromVar); m_pFromVar = pVar; } inline BOOL IsVisited() { LIMITED_METHOD_CONTRACT; return (m_pFromVarAsPtr & NODE_VISITED_FLAG); } inline void SetVisited() { LIMITED_METHOD_CONTRACT; m_pFromVarAsPtr |= NODE_VISITED_FLAG; } inline void ClearVisited() { LIMITED_METHOD_CONTRACT; m_pFromVarAsPtr &= ~NODE_VISITED_FLAG; } }; // Recursive worker that checks whether a node is part of an expanding cycle. BOOL HasExpandingCycle(Node *pCurrentNode, Node *pStartNode, BOOL fExpanded = FALSE); protected: // Array of nodes, each representing a generic variable owned by m_thOwner. The // number of nodes is m_thOwner.GetNumGenericArgs() and the order corresponds // to m_thOwner's instantiation. Node *m_pNodes; #endif // !DACCESS_COMPILE protected: RecursionGraph *m_pPrev; TypeHandle m_thOwner; }; // Check for legal instantiations. Returns true if the instantiation is legal. BOOL CheckInstantiation(Instantiation inst); BOOL GetExactInstantiationsOfMethodAndItsClassFromCallInformation( /* in */ MethodDesc *pRepMethod, /* in */ OBJECTREF pThis, /* in */ PTR_VOID pParamTypeArg, /* out*/ TypeHandle *pSpecificClass, /* out*/ MethodDesc** pSpecificMethod); BOOL GetExactInstantiationsOfMethodAndItsClassFromCallInformation( /* in */ MethodDesc *pRepMethod, /* in */ PTR_VOID pExactGenericArgsToken, /* out*/ TypeHandle *pSpecificClass, /* out*/ MethodDesc** pSpecificMethod); }; #endif
-1
dotnet/runtime
66,234
Remove compiler warning suppression
Contributes to https://github.com/dotnet/runtime/issues/66154 All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address. ~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~ Upstream PR: https://github.com/libunwind/libunwind/pull/333 /cc @GrabYourPitchforks
AaronRobinsonMSFT
2022-03-05T03:45:49Z
2022-03-09T00:57:12Z
220e67755ae6323a01011b83070dff6f84b02519
853e494abd1c1e12d28a76829b4680af7f694afa
Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154 All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address. ~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~ Upstream PR: https://github.com/libunwind/libunwind/pull/333 /cc @GrabYourPitchforks
./src/tests/Interop/PInvoke/Vector2_3_4/CMakeLists.txt
include ("${CLR_INTEROP_TEST_ROOT}/Interop.cmake") set(SOURCES Vector2_3_4TestNative.cpp ) add_library (Vector2_3_4TestNative SHARED ${SOURCES}) target_link_libraries(Vector2_3_4TestNative ${LINK_LIBRARIES_ADDITIONAL}) install (TARGETS Vector2_3_4TestNative DESTINATION bin)
include ("${CLR_INTEROP_TEST_ROOT}/Interop.cmake") set(SOURCES Vector2_3_4TestNative.cpp ) add_library (Vector2_3_4TestNative SHARED ${SOURCES}) target_link_libraries(Vector2_3_4TestNative ${LINK_LIBRARIES_ADDITIONAL}) install (TARGETS Vector2_3_4TestNative DESTINATION bin)
-1
dotnet/runtime
66,234
Remove compiler warning suppression
Contributes to https://github.com/dotnet/runtime/issues/66154 All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address. ~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~ Upstream PR: https://github.com/libunwind/libunwind/pull/333 /cc @GrabYourPitchforks
AaronRobinsonMSFT
2022-03-05T03:45:49Z
2022-03-09T00:57:12Z
220e67755ae6323a01011b83070dff6f84b02519
853e494abd1c1e12d28a76829b4680af7f694afa
Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154 All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address. ~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~ Upstream PR: https://github.com/libunwind/libunwind/pull/333 /cc @GrabYourPitchforks
./src/coreclr/pal/src/include/pal/corunix.inl
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*++ Module Name: --*/ #ifndef _CORUNIX_INL #define _CORUNIX_INL #include "corunix.hpp" #include "dbgmsg.h" namespace CorUnix { bool CAllowedObjectTypes::IsTypeAllowed(PalObjectTypeId eTypeId) { _ASSERTE(eTypeId != ObjectTypeIdCount); return m_rgfAllowedTypes[eTypeId]; }; CAllowedObjectTypes::CAllowedObjectTypes( PalObjectTypeId rgAllowedTypes[], DWORD dwAllowedTypeCount ) { ZeroMemory(m_rgfAllowedTypes, sizeof(m_rgfAllowedTypes)); for (DWORD dw = 0; dw < dwAllowedTypeCount; dw += 1) { _ASSERTE(rgAllowedTypes[dw] != ObjectTypeIdCount); m_rgfAllowedTypes[rgAllowedTypes[dw]] = TRUE; } }; CAllowedObjectTypes::CAllowedObjectTypes( PalObjectTypeId eAllowedType ) { ZeroMemory(m_rgfAllowedTypes, sizeof(m_rgfAllowedTypes)); _ASSERTE(eAllowedType != ObjectTypeIdCount); m_rgfAllowedTypes[eAllowedType] = TRUE; }; } #endif // _CORUNIX_H
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*++ Module Name: --*/ #ifndef _CORUNIX_INL #define _CORUNIX_INL #include "corunix.hpp" #include "dbgmsg.h" namespace CorUnix { bool CAllowedObjectTypes::IsTypeAllowed(PalObjectTypeId eTypeId) { _ASSERTE(eTypeId != ObjectTypeIdCount); return m_rgfAllowedTypes[eTypeId]; }; CAllowedObjectTypes::CAllowedObjectTypes( PalObjectTypeId rgAllowedTypes[], DWORD dwAllowedTypeCount ) { ZeroMemory(m_rgfAllowedTypes, sizeof(m_rgfAllowedTypes)); for (DWORD dw = 0; dw < dwAllowedTypeCount; dw += 1) { _ASSERTE(rgAllowedTypes[dw] != ObjectTypeIdCount); m_rgfAllowedTypes[rgAllowedTypes[dw]] = TRUE; } }; CAllowedObjectTypes::CAllowedObjectTypes( PalObjectTypeId eAllowedType ) { ZeroMemory(m_rgfAllowedTypes, sizeof(m_rgfAllowedTypes)); _ASSERTE(eAllowedType != ObjectTypeIdCount); m_rgfAllowedTypes[eAllowedType] = TRUE; }; } #endif // _CORUNIX_H
-1
dotnet/runtime
66,234
Remove compiler warning suppression
Contributes to https://github.com/dotnet/runtime/issues/66154 All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address. ~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~ Upstream PR: https://github.com/libunwind/libunwind/pull/333 /cc @GrabYourPitchforks
AaronRobinsonMSFT
2022-03-05T03:45:49Z
2022-03-09T00:57:12Z
220e67755ae6323a01011b83070dff6f84b02519
853e494abd1c1e12d28a76829b4680af7f694afa
Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154 All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address. ~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~ Upstream PR: https://github.com/libunwind/libunwind/pull/333 /cc @GrabYourPitchforks
./src/native/public/mono/metadata/details/image-functions.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // This file does not have ifdef guards, it is meant to be included multiple times with different definitions of MONO_API_FUNCTION #ifndef MONO_API_FUNCTION #error "MONO_API_FUNCTION(ret,name,args) macro not defined before including function declaration header" #endif MONO_API_FUNCTION(void, mono_images_init, (void)) MONO_API_FUNCTION(MONO_RT_EXTERNAL_ONLY void, mono_images_cleanup, (void)) MONO_API_FUNCTION(MonoImage *, mono_image_open, (const char *fname, MonoImageOpenStatus *status)) MONO_API_FUNCTION(MONO_RT_EXTERNAL_ONLY MonoImage *, mono_image_open_full, (const char *fname, MonoImageOpenStatus *status, mono_bool refonly)) MONO_API_FUNCTION(MONO_RT_EXTERNAL_ONLY MonoImage *, mono_pe_file_open, (const char *fname, MonoImageOpenStatus *status)) MONO_API_FUNCTION(MONO_RT_EXTERNAL_ONLY MonoImage *, mono_image_open_from_data, (char *data, uint32_t data_len, mono_bool need_copy, MonoImageOpenStatus *status)) MONO_API_FUNCTION(MONO_RT_EXTERNAL_ONLY MonoImage *, mono_image_open_from_data_full, (char *data, uint32_t data_len, mono_bool need_copy, MonoImageOpenStatus *status, mono_bool refonly)) MONO_API_FUNCTION(MONO_RT_EXTERNAL_ONLY MonoImage *, mono_image_open_from_data_with_name, (char *data, uint32_t data_len, mono_bool need_copy, MonoImageOpenStatus *status, mono_bool refonly, const char *name)) MONO_API_FUNCTION(void, mono_image_fixup_vtable, (MonoImage *image)) MONO_API_FUNCTION(MONO_RT_EXTERNAL_ONLY MonoImage *, mono_image_loaded, (const char *name)) MONO_API_FUNCTION(MONO_RT_EXTERNAL_ONLY MonoImage *, mono_image_loaded_full, (const char *name, mono_bool refonly)) MONO_API_FUNCTION(MONO_RT_EXTERNAL_ONLY MonoImage *, mono_image_loaded_by_guid, (const char *guid)) MONO_API_FUNCTION(MONO_RT_EXTERNAL_ONLY MonoImage *, mono_image_loaded_by_guid_full, (const char *guid, mono_bool refonly)) MONO_API_FUNCTION(void, mono_image_init, (MonoImage *image)) MONO_API_FUNCTION(void, mono_image_close, (MonoImage *image)) MONO_API_FUNCTION(void, mono_image_addref, (MonoImage *image)) MONO_API_FUNCTION(const char *, mono_image_strerror, (MonoImageOpenStatus status)) MONO_API_FUNCTION(int, mono_image_ensure_section, (MonoImage *image, const char *section)) MONO_API_FUNCTION(int, mono_image_ensure_section_idx, (MonoImage *image, int section)) MONO_API_FUNCTION(uint32_t, mono_image_get_entry_point, (MonoImage *image)) MONO_API_FUNCTION(const char *, mono_image_get_resource, (MonoImage *image, uint32_t offset, uint32_t *size)) MONO_API_FUNCTION(MONO_RT_EXTERNAL_ONLY MonoImage*, mono_image_load_file_for_image, (MonoImage *image, int fileidx)) MONO_API_FUNCTION(MONO_RT_EXTERNAL_ONLY MonoImage*, mono_image_load_module, (MonoImage *image, int idx)) MONO_API_FUNCTION(const char*, mono_image_get_name, (MonoImage *image)) MONO_API_FUNCTION(const char*, mono_image_get_filename, (MonoImage *image)) MONO_API_FUNCTION(const char*, mono_image_get_guid, (MonoImage *image)) MONO_API_FUNCTION(MonoAssembly*, mono_image_get_assembly, (MonoImage *image)) MONO_API_FUNCTION(mono_bool, mono_image_is_dynamic, (MonoImage *image)) MONO_API_FUNCTION(char*, mono_image_rva_map, (MonoImage *image, uint32_t rva)) MONO_API_FUNCTION(const MonoTableInfo *, mono_image_get_table_info, (MonoImage *image, int table_id)) MONO_API_FUNCTION(int, mono_image_get_table_rows, (MonoImage *image, int table_id)) MONO_API_FUNCTION(int, mono_table_info_get_rows, (const MonoTableInfo *table)) /* This actually returns a MonoPEResourceDataEntry *, but declaring it * causes an include file loop. */ MONO_API_FUNCTION(void*, mono_image_lookup_resource, (MonoImage *image, uint32_t res_id, uint32_t lang_id, mono_unichar2 *name)) MONO_API_FUNCTION(const char*, mono_image_get_public_key, (MonoImage *image, uint32_t *size)) MONO_API_FUNCTION(const char*, mono_image_get_strong_name, (MonoImage *image, uint32_t *size)) MONO_API_FUNCTION(uint32_t, mono_image_strong_name_position, (MonoImage *image, uint32_t *size)) MONO_API_FUNCTION(void, mono_image_add_to_name_cache, (MonoImage *image, const char *nspace, const char *name, uint32_t idx)) MONO_API_FUNCTION(mono_bool, mono_image_has_authenticode_entry, (MonoImage *image))
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // This file does not have ifdef guards, it is meant to be included multiple times with different definitions of MONO_API_FUNCTION #ifndef MONO_API_FUNCTION #error "MONO_API_FUNCTION(ret,name,args) macro not defined before including function declaration header" #endif MONO_API_FUNCTION(void, mono_images_init, (void)) MONO_API_FUNCTION(MONO_RT_EXTERNAL_ONLY void, mono_images_cleanup, (void)) MONO_API_FUNCTION(MonoImage *, mono_image_open, (const char *fname, MonoImageOpenStatus *status)) MONO_API_FUNCTION(MONO_RT_EXTERNAL_ONLY MonoImage *, mono_image_open_full, (const char *fname, MonoImageOpenStatus *status, mono_bool refonly)) MONO_API_FUNCTION(MONO_RT_EXTERNAL_ONLY MonoImage *, mono_pe_file_open, (const char *fname, MonoImageOpenStatus *status)) MONO_API_FUNCTION(MONO_RT_EXTERNAL_ONLY MonoImage *, mono_image_open_from_data, (char *data, uint32_t data_len, mono_bool need_copy, MonoImageOpenStatus *status)) MONO_API_FUNCTION(MONO_RT_EXTERNAL_ONLY MonoImage *, mono_image_open_from_data_full, (char *data, uint32_t data_len, mono_bool need_copy, MonoImageOpenStatus *status, mono_bool refonly)) MONO_API_FUNCTION(MONO_RT_EXTERNAL_ONLY MonoImage *, mono_image_open_from_data_with_name, (char *data, uint32_t data_len, mono_bool need_copy, MonoImageOpenStatus *status, mono_bool refonly, const char *name)) MONO_API_FUNCTION(void, mono_image_fixup_vtable, (MonoImage *image)) MONO_API_FUNCTION(MONO_RT_EXTERNAL_ONLY MonoImage *, mono_image_loaded, (const char *name)) MONO_API_FUNCTION(MONO_RT_EXTERNAL_ONLY MonoImage *, mono_image_loaded_full, (const char *name, mono_bool refonly)) MONO_API_FUNCTION(MONO_RT_EXTERNAL_ONLY MonoImage *, mono_image_loaded_by_guid, (const char *guid)) MONO_API_FUNCTION(MONO_RT_EXTERNAL_ONLY MonoImage *, mono_image_loaded_by_guid_full, (const char *guid, mono_bool refonly)) MONO_API_FUNCTION(void, mono_image_init, (MonoImage *image)) MONO_API_FUNCTION(void, mono_image_close, (MonoImage *image)) MONO_API_FUNCTION(void, mono_image_addref, (MonoImage *image)) MONO_API_FUNCTION(const char *, mono_image_strerror, (MonoImageOpenStatus status)) MONO_API_FUNCTION(int, mono_image_ensure_section, (MonoImage *image, const char *section)) MONO_API_FUNCTION(int, mono_image_ensure_section_idx, (MonoImage *image, int section)) MONO_API_FUNCTION(uint32_t, mono_image_get_entry_point, (MonoImage *image)) MONO_API_FUNCTION(const char *, mono_image_get_resource, (MonoImage *image, uint32_t offset, uint32_t *size)) MONO_API_FUNCTION(MONO_RT_EXTERNAL_ONLY MonoImage*, mono_image_load_file_for_image, (MonoImage *image, int fileidx)) MONO_API_FUNCTION(MONO_RT_EXTERNAL_ONLY MonoImage*, mono_image_load_module, (MonoImage *image, int idx)) MONO_API_FUNCTION(const char*, mono_image_get_name, (MonoImage *image)) MONO_API_FUNCTION(const char*, mono_image_get_filename, (MonoImage *image)) MONO_API_FUNCTION(const char*, mono_image_get_guid, (MonoImage *image)) MONO_API_FUNCTION(MonoAssembly*, mono_image_get_assembly, (MonoImage *image)) MONO_API_FUNCTION(mono_bool, mono_image_is_dynamic, (MonoImage *image)) MONO_API_FUNCTION(char*, mono_image_rva_map, (MonoImage *image, uint32_t rva)) MONO_API_FUNCTION(const MonoTableInfo *, mono_image_get_table_info, (MonoImage *image, int table_id)) MONO_API_FUNCTION(int, mono_image_get_table_rows, (MonoImage *image, int table_id)) MONO_API_FUNCTION(int, mono_table_info_get_rows, (const MonoTableInfo *table)) /* This actually returns a MonoPEResourceDataEntry *, but declaring it * causes an include file loop. */ MONO_API_FUNCTION(void*, mono_image_lookup_resource, (MonoImage *image, uint32_t res_id, uint32_t lang_id, mono_unichar2 *name)) MONO_API_FUNCTION(const char*, mono_image_get_public_key, (MonoImage *image, uint32_t *size)) MONO_API_FUNCTION(const char*, mono_image_get_strong_name, (MonoImage *image, uint32_t *size)) MONO_API_FUNCTION(uint32_t, mono_image_strong_name_position, (MonoImage *image, uint32_t *size)) MONO_API_FUNCTION(void, mono_image_add_to_name_cache, (MonoImage *image, const char *nspace, const char *name, uint32_t idx)) MONO_API_FUNCTION(mono_bool, mono_image_has_authenticode_entry, (MonoImage *image))
-1
dotnet/runtime
66,234
Remove compiler warning suppression
Contributes to https://github.com/dotnet/runtime/issues/66154 All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address. ~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~ Upstream PR: https://github.com/libunwind/libunwind/pull/333 /cc @GrabYourPitchforks
AaronRobinsonMSFT
2022-03-05T03:45:49Z
2022-03-09T00:57:12Z
220e67755ae6323a01011b83070dff6f84b02519
853e494abd1c1e12d28a76829b4680af7f694afa
Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154 All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address. ~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~ Upstream PR: https://github.com/libunwind/libunwind/pull/333 /cc @GrabYourPitchforks
./src/mono/mono/eglib/test/dir.c
#include <config.h> #include <glib.h> #include <string.h> #include <stdio.h> #ifdef HAVE_UNISTD_H #include <unistd.h> #endif #ifdef G_OS_UNIX #include <pthread.h> #endif #include "test.h" /* This test is just to be used with valgrind */ static RESULT test_dir (void) { GDir *dir; GError *gerror; const gchar *name; /* dir = g_dir_open (NULL, 0, NULL); */ dir = g_dir_open ("", 0, NULL); if (dir != NULL) return FAILED ("1 Should be an error"); dir = g_dir_open ("", 9, NULL); if (dir != NULL) return FAILED ("2 Should be an error"); gerror = NULL; dir = g_dir_open (".ljasdslakjd", 9, &gerror); if (dir != NULL) return FAILED ("3 opendir should fail"); if (gerror == NULL) return FAILED ("4 got no error"); g_error_free (gerror); gerror = NULL; dir = g_dir_open (g_get_tmp_dir (), 9, &gerror); if (dir == NULL) return FAILED ("5 opendir should succeed"); if (gerror != NULL) return FAILED ("6 got an error"); name = NULL; name = g_dir_read_name (dir); if (name == NULL) return FAILED ("7 didn't read a file name"); while ((name = g_dir_read_name (dir)) != NULL) { if (strcmp (name, ".") == 0) return FAILED (". directory found"); if (strcmp (name, "..") == 0) return FAILED (".. directory found"); } g_dir_close (dir); return OK; } static Test dir_tests [] = { {"g_dir_*", test_dir}, {NULL, NULL} }; DEFINE_TEST_GROUP_INIT(dir_tests_init, dir_tests)
#include <config.h> #include <glib.h> #include <string.h> #include <stdio.h> #ifdef HAVE_UNISTD_H #include <unistd.h> #endif #ifdef G_OS_UNIX #include <pthread.h> #endif #include "test.h" /* This test is just to be used with valgrind */ static RESULT test_dir (void) { GDir *dir; GError *gerror; const gchar *name; /* dir = g_dir_open (NULL, 0, NULL); */ dir = g_dir_open ("", 0, NULL); if (dir != NULL) return FAILED ("1 Should be an error"); dir = g_dir_open ("", 9, NULL); if (dir != NULL) return FAILED ("2 Should be an error"); gerror = NULL; dir = g_dir_open (".ljasdslakjd", 9, &gerror); if (dir != NULL) return FAILED ("3 opendir should fail"); if (gerror == NULL) return FAILED ("4 got no error"); g_error_free (gerror); gerror = NULL; dir = g_dir_open (g_get_tmp_dir (), 9, &gerror); if (dir == NULL) return FAILED ("5 opendir should succeed"); if (gerror != NULL) return FAILED ("6 got an error"); name = NULL; name = g_dir_read_name (dir); if (name == NULL) return FAILED ("7 didn't read a file name"); while ((name = g_dir_read_name (dir)) != NULL) { if (strcmp (name, ".") == 0) return FAILED (". directory found"); if (strcmp (name, "..") == 0) return FAILED (".. directory found"); } g_dir_close (dir); return OK; } static Test dir_tests [] = { {"g_dir_*", test_dir}, {NULL, NULL} }; DEFINE_TEST_GROUP_INIT(dir_tests_init, dir_tests)
-1
dotnet/runtime
66,234
Remove compiler warning suppression
Contributes to https://github.com/dotnet/runtime/issues/66154 All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address. ~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~ Upstream PR: https://github.com/libunwind/libunwind/pull/333 /cc @GrabYourPitchforks
AaronRobinsonMSFT
2022-03-05T03:45:49Z
2022-03-09T00:57:12Z
220e67755ae6323a01011b83070dff6f84b02519
853e494abd1c1e12d28a76829b4680af7f694afa
Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154 All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address. ~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~ Upstream PR: https://github.com/libunwind/libunwind/pull/333 /cc @GrabYourPitchforks
./src/coreclr/pal/tests/palsuite/threading/CriticalSectionFunctions/test4/test4.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================ ** ** Source: criticalsectionfunctions/test4/test4.c ** ** Purpose: Test to see if threads blocked on a CRITICAL_SECTION object will ** be released in an orderly manner. This case looks at the following ** scenario. If one thread owns a CRITICAL_SECTION object and two threads ** block in EnterCriticalSection, trying to hold the already owned ** CRITICAL_SECTION object, when the first thread releases the CRITICAL_SECTION ** object, will one and only one of the waiters get unblocked? ** ** Dependencies: CreateThread ** InitializeCriticalSection ** EnterCriticalSection ** LeaveCriticalSection ** DeleteCriticalSection ** Sleep ** WaitForSingleObject ** ** **=========================================================*/ #include <palsuite.h> #define NUM_BLOCKING_THREADS 2 BOOL bTestResult_CriticalSectionFunctions_test4; CRITICAL_SECTION CriticalSection_CriticalSectionFunctions_test4; HANDLE hThread_CriticalSectionFunctions_test4[NUM_BLOCKING_THREADS]; HANDLE hEvent_CriticalSectionFunctions_test4; DWORD dwThreadId_CriticalSectionFunctions_test4[NUM_BLOCKING_THREADS]; volatile int flags_CriticalSectionFunctions_test4[NUM_BLOCKING_THREADS] = {0,0}; DWORD PALAPI ThreadTest1_CriticalSectionFunctions_test4(LPVOID lpParam) { EnterCriticalSection ( &CriticalSection_CriticalSectionFunctions_test4 ); flags_CriticalSectionFunctions_test4[0] = 1; return 0; } DWORD PALAPI ThreadTest2_CriticalSectionFunctions_test4(LPVOID lpParam) { EnterCriticalSection ( &CriticalSection_CriticalSectionFunctions_test4 ); flags_CriticalSectionFunctions_test4[1] = 1; return 0; } PALTEST(threading_CriticalSectionFunctions_test4_paltest_criticalsectionfunctions_test4, "threading/CriticalSectionFunctions/test4/paltest_criticalsectionfunctions_test4") { DWORD dwRet; DWORD dwRet1; bTestResult_CriticalSectionFunctions_test4 = FAIL; if ((PAL_Initialize(argc,argv)) != 0) { return(bTestResult_CriticalSectionFunctions_test4); } /* * Create Critical Section Object */ InitializeCriticalSection ( &CriticalSection_CriticalSectionFunctions_test4 ); EnterCriticalSection ( &CriticalSection_CriticalSectionFunctions_test4 ); hThread_CriticalSectionFunctions_test4[0] = CreateThread(NULL, 0, &ThreadTest1_CriticalSectionFunctions_test4, (LPVOID) 0, CREATE_SUSPENDED, &dwThreadId_CriticalSectionFunctions_test4[0]); if (hThread_CriticalSectionFunctions_test4[0] == NULL) { Trace("PALSUITE ERROR: CreateThread(%p, %d, %p, %p, %d, %p) call " "failed.\nGetLastError returned %d.\n", NULL, 0, &ThreadTest1_CriticalSectionFunctions_test4, (LPVOID) 0, CREATE_SUSPENDED, &dwThreadId_CriticalSectionFunctions_test4[0], GetLastError()); LeaveCriticalSection(&CriticalSection_CriticalSectionFunctions_test4); DeleteCriticalSection ( &CriticalSection_CriticalSectionFunctions_test4 ); Fail(""); } hThread_CriticalSectionFunctions_test4[1] = CreateThread(NULL, 0, &ThreadTest2_CriticalSectionFunctions_test4, (LPVOID) 0, CREATE_SUSPENDED, &dwThreadId_CriticalSectionFunctions_test4[1]); if (hThread_CriticalSectionFunctions_test4[1] == NULL) { Trace("PALSUITE ERROR: CreateThread(%p, %d, %p, %p, %d, %p) call " "failed.\nGetLastError returned %d.\n", NULL, 0, &ThreadTest2_CriticalSectionFunctions_test4, (LPVOID) 0, CREATE_SUSPENDED, &dwThreadId_CriticalSectionFunctions_test4[1], GetLastError()); LeaveCriticalSection(&CriticalSection_CriticalSectionFunctions_test4); dwRet = ResumeThread(hThread_CriticalSectionFunctions_test4[0]); if (-1 == dwRet) { Trace("PALSUITE ERROR: ResumeThread(%p) call failed.\n" "GetLastError returned '%d'.\n", hThread_CriticalSectionFunctions_test4[0], GetLastError()); } dwRet = WaitForSingleObject(hThread_CriticalSectionFunctions_test4[0], 10000); if (WAIT_OBJECT_0 == dwRet) { Trace("PALSUITE ERROR: WaitForSingleObject(%p, %d) call " "failed. '%d' was returned instead of the expected '%d'.\n" "GetLastError returned '%d'.\n", hThread_CriticalSectionFunctions_test4[0], 10000, dwRet, WAIT_OBJECT_0, GetLastError()); } if (0 == CloseHandle(hThread_CriticalSectionFunctions_test4[0])) { Trace("PALSUITE NOTIFICATION: CloseHandle(%p) call failed.\n" "GetLastError returned %d. Not failing tests.\n", hThread_CriticalSectionFunctions_test4[0], GetLastError()); } DeleteCriticalSection(&CriticalSection_CriticalSectionFunctions_test4); Fail(""); } /* * Set other thread priorities to be higher than ours & Sleep to ensure * we give up the processor. */ dwRet = (DWORD) SetThreadPriority(hThread_CriticalSectionFunctions_test4[0], THREAD_PRIORITY_ABOVE_NORMAL); if (0 == dwRet) { Trace("PALSUITE ERROR: SetThreadPriority(%p, %d) call failed.\n" "GetLastError returned %d", hThread_CriticalSectionFunctions_test4[0], THREAD_PRIORITY_ABOVE_NORMAL, GetLastError()); } dwRet = (DWORD) SetThreadPriority(hThread_CriticalSectionFunctions_test4[1], THREAD_PRIORITY_ABOVE_NORMAL); if (0 == dwRet) { Trace("PALSUITE ERROR: SetThreadPriority(%p, %d) call failed.\n" "GetLastError returned %d", hThread_CriticalSectionFunctions_test4[1], THREAD_PRIORITY_ABOVE_NORMAL, GetLastError()); } dwRet = ResumeThread(hThread_CriticalSectionFunctions_test4[0]); if (-1 == dwRet) { Trace("PALSUITE ERROR: ResumeThread(%p, %d) call failed.\n" "GetLastError returned %d", hThread_CriticalSectionFunctions_test4[0], GetLastError() ); } dwRet = ResumeThread(hThread_CriticalSectionFunctions_test4[1]); if (-1 == dwRet) { Trace("PALSUITE ERROR: ResumeThread(%p, %d) call failed.\n" "GetLastError returned %d", hThread_CriticalSectionFunctions_test4[0], GetLastError()); } Sleep (0); LeaveCriticalSection (&CriticalSection_CriticalSectionFunctions_test4); dwRet = WaitForSingleObject(hThread_CriticalSectionFunctions_test4[0], 10000); dwRet1 = WaitForSingleObject(hThread_CriticalSectionFunctions_test4[1], 10000); if ((WAIT_OBJECT_0 == dwRet) || (WAIT_OBJECT_0 == dwRet1)) { if ((1 == flags_CriticalSectionFunctions_test4[0] && 0 == flags_CriticalSectionFunctions_test4[1]) || (0 == flags_CriticalSectionFunctions_test4[0] && 1 == flags_CriticalSectionFunctions_test4[1])) { bTestResult_CriticalSectionFunctions_test4 = PASS; } else { bTestResult_CriticalSectionFunctions_test4 = FAIL; Trace ("PALSUITE ERROR: flags[%d] = {%d,%d}. These values are" "inconsistent.\nCriticalSection test failed.\n", NUM_BLOCKING_THREADS, flags_CriticalSectionFunctions_test4[0], flags_CriticalSectionFunctions_test4[1]); } /* Fail the test if both threads returned WAIT_OBJECT_0 */ if ((WAIT_OBJECT_0 == dwRet) && (WAIT_OBJECT_0 == dwRet1)) { bTestResult_CriticalSectionFunctions_test4 = FAIL; Trace ("PALSUITE ERROR: WaitForSingleObject(%p, %d) and " "WaitForSingleObject(%p, %d)\nboth returned dwRet = '%d'\n" "One should have returned WAIT_TIMEOUT ('%d').\n", hThread_CriticalSectionFunctions_test4[0], 10000, hThread_CriticalSectionFunctions_test4[1], 10000, dwRet, WAIT_TIMEOUT); } } else { bTestResult_CriticalSectionFunctions_test4 = FAIL; Trace ("PALSUITE ERROR: WaitForSingleObject(%p, %d) and " "WaitForSingleObject(%p, %d)\nReturned dwRet = '%d' and\n" "dwRet1 = '%d' respectively.\n", hThread_CriticalSectionFunctions_test4[0], 10000, hThread_CriticalSectionFunctions_test4[1], 10000, dwRet, dwRet1); } if (WAIT_OBJECT_0 == dwRet) { if (0 == CloseHandle(hThread_CriticalSectionFunctions_test4[0])) { Trace("PALSUITE NOTIFICATION: CloseHandle(%p) call failed.\n" "GetLastError returned %d. Not failing tests.\n", hThread_CriticalSectionFunctions_test4[0], GetLastError()); } } if (WAIT_OBJECT_0 == dwRet1) { if (0 == CloseHandle(hThread_CriticalSectionFunctions_test4[1])) { Trace("PALSUITE NOTIFICATION: CloseHandle(%p) call failed.\n" "GetLastError returned %d. Not failing tests.\n", hThread_CriticalSectionFunctions_test4[1], GetLastError()); } } /* Leaking the CS on purpose, since there is still a thread waiting on it */ PAL_TerminateEx(bTestResult_CriticalSectionFunctions_test4); return (bTestResult_CriticalSectionFunctions_test4); }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================ ** ** Source: criticalsectionfunctions/test4/test4.c ** ** Purpose: Test to see if threads blocked on a CRITICAL_SECTION object will ** be released in an orderly manner. This case looks at the following ** scenario. If one thread owns a CRITICAL_SECTION object and two threads ** block in EnterCriticalSection, trying to hold the already owned ** CRITICAL_SECTION object, when the first thread releases the CRITICAL_SECTION ** object, will one and only one of the waiters get unblocked? ** ** Dependencies: CreateThread ** InitializeCriticalSection ** EnterCriticalSection ** LeaveCriticalSection ** DeleteCriticalSection ** Sleep ** WaitForSingleObject ** ** **=========================================================*/ #include <palsuite.h> #define NUM_BLOCKING_THREADS 2 BOOL bTestResult_CriticalSectionFunctions_test4; CRITICAL_SECTION CriticalSection_CriticalSectionFunctions_test4; HANDLE hThread_CriticalSectionFunctions_test4[NUM_BLOCKING_THREADS]; HANDLE hEvent_CriticalSectionFunctions_test4; DWORD dwThreadId_CriticalSectionFunctions_test4[NUM_BLOCKING_THREADS]; volatile int flags_CriticalSectionFunctions_test4[NUM_BLOCKING_THREADS] = {0,0}; DWORD PALAPI ThreadTest1_CriticalSectionFunctions_test4(LPVOID lpParam) { EnterCriticalSection ( &CriticalSection_CriticalSectionFunctions_test4 ); flags_CriticalSectionFunctions_test4[0] = 1; return 0; } DWORD PALAPI ThreadTest2_CriticalSectionFunctions_test4(LPVOID lpParam) { EnterCriticalSection ( &CriticalSection_CriticalSectionFunctions_test4 ); flags_CriticalSectionFunctions_test4[1] = 1; return 0; } PALTEST(threading_CriticalSectionFunctions_test4_paltest_criticalsectionfunctions_test4, "threading/CriticalSectionFunctions/test4/paltest_criticalsectionfunctions_test4") { DWORD dwRet; DWORD dwRet1; bTestResult_CriticalSectionFunctions_test4 = FAIL; if ((PAL_Initialize(argc,argv)) != 0) { return(bTestResult_CriticalSectionFunctions_test4); } /* * Create Critical Section Object */ InitializeCriticalSection ( &CriticalSection_CriticalSectionFunctions_test4 ); EnterCriticalSection ( &CriticalSection_CriticalSectionFunctions_test4 ); hThread_CriticalSectionFunctions_test4[0] = CreateThread(NULL, 0, &ThreadTest1_CriticalSectionFunctions_test4, (LPVOID) 0, CREATE_SUSPENDED, &dwThreadId_CriticalSectionFunctions_test4[0]); if (hThread_CriticalSectionFunctions_test4[0] == NULL) { Trace("PALSUITE ERROR: CreateThread(%p, %d, %p, %p, %d, %p) call " "failed.\nGetLastError returned %d.\n", NULL, 0, &ThreadTest1_CriticalSectionFunctions_test4, (LPVOID) 0, CREATE_SUSPENDED, &dwThreadId_CriticalSectionFunctions_test4[0], GetLastError()); LeaveCriticalSection(&CriticalSection_CriticalSectionFunctions_test4); DeleteCriticalSection ( &CriticalSection_CriticalSectionFunctions_test4 ); Fail(""); } hThread_CriticalSectionFunctions_test4[1] = CreateThread(NULL, 0, &ThreadTest2_CriticalSectionFunctions_test4, (LPVOID) 0, CREATE_SUSPENDED, &dwThreadId_CriticalSectionFunctions_test4[1]); if (hThread_CriticalSectionFunctions_test4[1] == NULL) { Trace("PALSUITE ERROR: CreateThread(%p, %d, %p, %p, %d, %p) call " "failed.\nGetLastError returned %d.\n", NULL, 0, &ThreadTest2_CriticalSectionFunctions_test4, (LPVOID) 0, CREATE_SUSPENDED, &dwThreadId_CriticalSectionFunctions_test4[1], GetLastError()); LeaveCriticalSection(&CriticalSection_CriticalSectionFunctions_test4); dwRet = ResumeThread(hThread_CriticalSectionFunctions_test4[0]); if (-1 == dwRet) { Trace("PALSUITE ERROR: ResumeThread(%p) call failed.\n" "GetLastError returned '%d'.\n", hThread_CriticalSectionFunctions_test4[0], GetLastError()); } dwRet = WaitForSingleObject(hThread_CriticalSectionFunctions_test4[0], 10000); if (WAIT_OBJECT_0 == dwRet) { Trace("PALSUITE ERROR: WaitForSingleObject(%p, %d) call " "failed. '%d' was returned instead of the expected '%d'.\n" "GetLastError returned '%d'.\n", hThread_CriticalSectionFunctions_test4[0], 10000, dwRet, WAIT_OBJECT_0, GetLastError()); } if (0 == CloseHandle(hThread_CriticalSectionFunctions_test4[0])) { Trace("PALSUITE NOTIFICATION: CloseHandle(%p) call failed.\n" "GetLastError returned %d. Not failing tests.\n", hThread_CriticalSectionFunctions_test4[0], GetLastError()); } DeleteCriticalSection(&CriticalSection_CriticalSectionFunctions_test4); Fail(""); } /* * Set other thread priorities to be higher than ours & Sleep to ensure * we give up the processor. */ dwRet = (DWORD) SetThreadPriority(hThread_CriticalSectionFunctions_test4[0], THREAD_PRIORITY_ABOVE_NORMAL); if (0 == dwRet) { Trace("PALSUITE ERROR: SetThreadPriority(%p, %d) call failed.\n" "GetLastError returned %d", hThread_CriticalSectionFunctions_test4[0], THREAD_PRIORITY_ABOVE_NORMAL, GetLastError()); } dwRet = (DWORD) SetThreadPriority(hThread_CriticalSectionFunctions_test4[1], THREAD_PRIORITY_ABOVE_NORMAL); if (0 == dwRet) { Trace("PALSUITE ERROR: SetThreadPriority(%p, %d) call failed.\n" "GetLastError returned %d", hThread_CriticalSectionFunctions_test4[1], THREAD_PRIORITY_ABOVE_NORMAL, GetLastError()); } dwRet = ResumeThread(hThread_CriticalSectionFunctions_test4[0]); if (-1 == dwRet) { Trace("PALSUITE ERROR: ResumeThread(%p, %d) call failed.\n" "GetLastError returned %d", hThread_CriticalSectionFunctions_test4[0], GetLastError() ); } dwRet = ResumeThread(hThread_CriticalSectionFunctions_test4[1]); if (-1 == dwRet) { Trace("PALSUITE ERROR: ResumeThread(%p, %d) call failed.\n" "GetLastError returned %d", hThread_CriticalSectionFunctions_test4[0], GetLastError()); } Sleep (0); LeaveCriticalSection (&CriticalSection_CriticalSectionFunctions_test4); dwRet = WaitForSingleObject(hThread_CriticalSectionFunctions_test4[0], 10000); dwRet1 = WaitForSingleObject(hThread_CriticalSectionFunctions_test4[1], 10000); if ((WAIT_OBJECT_0 == dwRet) || (WAIT_OBJECT_0 == dwRet1)) { if ((1 == flags_CriticalSectionFunctions_test4[0] && 0 == flags_CriticalSectionFunctions_test4[1]) || (0 == flags_CriticalSectionFunctions_test4[0] && 1 == flags_CriticalSectionFunctions_test4[1])) { bTestResult_CriticalSectionFunctions_test4 = PASS; } else { bTestResult_CriticalSectionFunctions_test4 = FAIL; Trace ("PALSUITE ERROR: flags[%d] = {%d,%d}. These values are" "inconsistent.\nCriticalSection test failed.\n", NUM_BLOCKING_THREADS, flags_CriticalSectionFunctions_test4[0], flags_CriticalSectionFunctions_test4[1]); } /* Fail the test if both threads returned WAIT_OBJECT_0 */ if ((WAIT_OBJECT_0 == dwRet) && (WAIT_OBJECT_0 == dwRet1)) { bTestResult_CriticalSectionFunctions_test4 = FAIL; Trace ("PALSUITE ERROR: WaitForSingleObject(%p, %d) and " "WaitForSingleObject(%p, %d)\nboth returned dwRet = '%d'\n" "One should have returned WAIT_TIMEOUT ('%d').\n", hThread_CriticalSectionFunctions_test4[0], 10000, hThread_CriticalSectionFunctions_test4[1], 10000, dwRet, WAIT_TIMEOUT); } } else { bTestResult_CriticalSectionFunctions_test4 = FAIL; Trace ("PALSUITE ERROR: WaitForSingleObject(%p, %d) and " "WaitForSingleObject(%p, %d)\nReturned dwRet = '%d' and\n" "dwRet1 = '%d' respectively.\n", hThread_CriticalSectionFunctions_test4[0], 10000, hThread_CriticalSectionFunctions_test4[1], 10000, dwRet, dwRet1); } if (WAIT_OBJECT_0 == dwRet) { if (0 == CloseHandle(hThread_CriticalSectionFunctions_test4[0])) { Trace("PALSUITE NOTIFICATION: CloseHandle(%p) call failed.\n" "GetLastError returned %d. Not failing tests.\n", hThread_CriticalSectionFunctions_test4[0], GetLastError()); } } if (WAIT_OBJECT_0 == dwRet1) { if (0 == CloseHandle(hThread_CriticalSectionFunctions_test4[1])) { Trace("PALSUITE NOTIFICATION: CloseHandle(%p) call failed.\n" "GetLastError returned %d. Not failing tests.\n", hThread_CriticalSectionFunctions_test4[1], GetLastError()); } } /* Leaking the CS on purpose, since there is still a thread waiting on it */ PAL_TerminateEx(bTestResult_CriticalSectionFunctions_test4); return (bTestResult_CriticalSectionFunctions_test4); }
-1
dotnet/runtime
66,234
Remove compiler warning suppression
Contributes to https://github.com/dotnet/runtime/issues/66154 All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address. ~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~ Upstream PR: https://github.com/libunwind/libunwind/pull/333 /cc @GrabYourPitchforks
AaronRobinsonMSFT
2022-03-05T03:45:49Z
2022-03-09T00:57:12Z
220e67755ae6323a01011b83070dff6f84b02519
853e494abd1c1e12d28a76829b4680af7f694afa
Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154 All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address. ~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~ Upstream PR: https://github.com/libunwind/libunwind/pull/333 /cc @GrabYourPitchforks
./src/native/libs/System.IO.Compression.Native/pal_zlib.c
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include <assert.h> #include <stdlib.h> #include "pal_zlib.h" #ifdef INTERNAL_ZLIB #ifdef _WIN32 #define c_static_assert(e) static_assert((e),"") #endif #include <external/zlib/zlib.h> #else #include "pal_utilities.h" #include <zlib.h> #endif c_static_assert(PAL_Z_NOFLUSH == Z_NO_FLUSH); c_static_assert(PAL_Z_FINISH == Z_FINISH); c_static_assert(PAL_Z_OK == Z_OK); c_static_assert(PAL_Z_STREAMEND == Z_STREAM_END); c_static_assert(PAL_Z_STREAMERROR == Z_STREAM_ERROR); c_static_assert(PAL_Z_DATAERROR == Z_DATA_ERROR); c_static_assert(PAL_Z_MEMERROR == Z_MEM_ERROR); c_static_assert(PAL_Z_BUFERROR == Z_BUF_ERROR); c_static_assert(PAL_Z_VERSIONERROR == Z_VERSION_ERROR); c_static_assert(PAL_Z_NOCOMPRESSION == Z_NO_COMPRESSION); c_static_assert(PAL_Z_BESTSPEED == Z_BEST_SPEED); c_static_assert(PAL_Z_DEFAULTCOMPRESSION == Z_DEFAULT_COMPRESSION); c_static_assert(PAL_Z_DEFAULTSTRATEGY == Z_DEFAULT_STRATEGY); c_static_assert(PAL_Z_DEFLATED == Z_DEFLATED); /* Initializes the PAL_ZStream by creating and setting its underlying z_stream. */ static int32_t Init(PAL_ZStream* stream) { z_stream* zStream = (z_stream*)malloc(sizeof(z_stream)); stream->internalState = zStream; if (zStream != NULL) { zStream->zalloc = Z_NULL; zStream->zfree = Z_NULL; zStream->opaque = Z_NULL; return PAL_Z_OK; } else { return PAL_Z_MEMERROR; } } /* Frees any memory on the PAL_ZStream that was created by Init. */ static void End(PAL_ZStream* stream) { z_stream* zStream = (z_stream*)(stream->internalState); assert(zStream != NULL); if (zStream != NULL) { free(zStream); stream->internalState = NULL; } } /* Transfers the output values from the underlying z_stream to the PAL_ZStream. */ static void TransferStateToPalZStream(z_stream* from, PAL_ZStream* to) { to->nextIn = from->next_in; to->availIn = from->avail_in; to->nextOut = from->next_out; to->availOut = from->avail_out; to->msg = from->msg; } /* Transfers the input values from the PAL_ZStream to the underlying z_stream object. */ static void TransferStateFromPalZStream(PAL_ZStream* from, z_stream* to) { to->next_in = from->nextIn; to->avail_in = from->availIn; to->next_out = from->nextOut; to->avail_out = from->availOut; } /* Gets the current z_stream object for the specified PAL_ZStream. This ensures any inputs are transferred from the PAL_ZStream to the underlying z_stream, since the current values are always needed. */ static z_stream* GetCurrentZStream(PAL_ZStream* stream) { z_stream* zStream = (z_stream*)(stream->internalState); assert(zStream != NULL); TransferStateFromPalZStream(stream, zStream); return zStream; } int32_t CompressionNative_DeflateInit2_( PAL_ZStream* stream, int32_t level, int32_t method, int32_t windowBits, int32_t memLevel, int32_t strategy) { assert(stream != NULL); int32_t result = Init(stream); if (result == PAL_Z_OK) { z_stream* zStream = GetCurrentZStream(stream); result = deflateInit2(zStream, level, method, windowBits, memLevel, strategy); TransferStateToPalZStream(zStream, stream); } return result; } int32_t CompressionNative_Deflate(PAL_ZStream* stream, int32_t flush) { assert(stream != NULL); z_stream* zStream = GetCurrentZStream(stream); int32_t result = deflate(zStream, flush); TransferStateToPalZStream(zStream, stream); return result; } int32_t CompressionNative_DeflateReset(PAL_ZStream* stream) { assert(stream != NULL); z_stream* zStream = GetCurrentZStream(stream); int32_t result = deflateReset(zStream); TransferStateToPalZStream(zStream, stream); return result; } int32_t CompressionNative_DeflateEnd(PAL_ZStream* stream) { assert(stream != NULL); z_stream* zStream = GetCurrentZStream(stream); int32_t result = deflateEnd(zStream); End(stream); return result; } int32_t CompressionNative_InflateInit2_(PAL_ZStream* stream, int32_t windowBits) { assert(stream != NULL); int32_t result = Init(stream); if (result == PAL_Z_OK) { z_stream* zStream = GetCurrentZStream(stream); result = inflateInit2(zStream, windowBits); TransferStateToPalZStream(zStream, stream); } return result; } int32_t CompressionNative_Inflate(PAL_ZStream* stream, int32_t flush) { assert(stream != NULL); z_stream* zStream = GetCurrentZStream(stream); int32_t result = inflate(zStream, flush); TransferStateToPalZStream(zStream, stream); return result; } int32_t CompressionNative_InflateReset(PAL_ZStream* stream) { assert(stream != NULL); z_stream* zStream = GetCurrentZStream(stream); int32_t result = inflateReset(zStream); TransferStateToPalZStream(zStream, stream); return result; } int32_t CompressionNative_InflateEnd(PAL_ZStream* stream) { assert(stream != NULL); z_stream* zStream = GetCurrentZStream(stream); int32_t result = inflateEnd(zStream); End(stream); return result; } uint32_t CompressionNative_Crc32(uint32_t crc, uint8_t* buffer, int32_t len) { assert(buffer != NULL); unsigned long result = crc32(crc, buffer, len); assert(result <= UINT32_MAX); return (uint32_t)result; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include <assert.h> #include <stdlib.h> #include "pal_zlib.h" #ifdef INTERNAL_ZLIB #ifdef _WIN32 #define c_static_assert(e) static_assert((e),"") #endif #include <external/zlib/zlib.h> #else #include "pal_utilities.h" #include <zlib.h> #endif c_static_assert(PAL_Z_NOFLUSH == Z_NO_FLUSH); c_static_assert(PAL_Z_FINISH == Z_FINISH); c_static_assert(PAL_Z_OK == Z_OK); c_static_assert(PAL_Z_STREAMEND == Z_STREAM_END); c_static_assert(PAL_Z_STREAMERROR == Z_STREAM_ERROR); c_static_assert(PAL_Z_DATAERROR == Z_DATA_ERROR); c_static_assert(PAL_Z_MEMERROR == Z_MEM_ERROR); c_static_assert(PAL_Z_BUFERROR == Z_BUF_ERROR); c_static_assert(PAL_Z_VERSIONERROR == Z_VERSION_ERROR); c_static_assert(PAL_Z_NOCOMPRESSION == Z_NO_COMPRESSION); c_static_assert(PAL_Z_BESTSPEED == Z_BEST_SPEED); c_static_assert(PAL_Z_DEFAULTCOMPRESSION == Z_DEFAULT_COMPRESSION); c_static_assert(PAL_Z_DEFAULTSTRATEGY == Z_DEFAULT_STRATEGY); c_static_assert(PAL_Z_DEFLATED == Z_DEFLATED); /* Initializes the PAL_ZStream by creating and setting its underlying z_stream. */ static int32_t Init(PAL_ZStream* stream) { z_stream* zStream = (z_stream*)malloc(sizeof(z_stream)); stream->internalState = zStream; if (zStream != NULL) { zStream->zalloc = Z_NULL; zStream->zfree = Z_NULL; zStream->opaque = Z_NULL; return PAL_Z_OK; } else { return PAL_Z_MEMERROR; } } /* Frees any memory on the PAL_ZStream that was created by Init. */ static void End(PAL_ZStream* stream) { z_stream* zStream = (z_stream*)(stream->internalState); assert(zStream != NULL); if (zStream != NULL) { free(zStream); stream->internalState = NULL; } } /* Transfers the output values from the underlying z_stream to the PAL_ZStream. */ static void TransferStateToPalZStream(z_stream* from, PAL_ZStream* to) { to->nextIn = from->next_in; to->availIn = from->avail_in; to->nextOut = from->next_out; to->availOut = from->avail_out; to->msg = from->msg; } /* Transfers the input values from the PAL_ZStream to the underlying z_stream object. */ static void TransferStateFromPalZStream(PAL_ZStream* from, z_stream* to) { to->next_in = from->nextIn; to->avail_in = from->availIn; to->next_out = from->nextOut; to->avail_out = from->availOut; } /* Gets the current z_stream object for the specified PAL_ZStream. This ensures any inputs are transferred from the PAL_ZStream to the underlying z_stream, since the current values are always needed. */ static z_stream* GetCurrentZStream(PAL_ZStream* stream) { z_stream* zStream = (z_stream*)(stream->internalState); assert(zStream != NULL); TransferStateFromPalZStream(stream, zStream); return zStream; } int32_t CompressionNative_DeflateInit2_( PAL_ZStream* stream, int32_t level, int32_t method, int32_t windowBits, int32_t memLevel, int32_t strategy) { assert(stream != NULL); int32_t result = Init(stream); if (result == PAL_Z_OK) { z_stream* zStream = GetCurrentZStream(stream); result = deflateInit2(zStream, level, method, windowBits, memLevel, strategy); TransferStateToPalZStream(zStream, stream); } return result; } int32_t CompressionNative_Deflate(PAL_ZStream* stream, int32_t flush) { assert(stream != NULL); z_stream* zStream = GetCurrentZStream(stream); int32_t result = deflate(zStream, flush); TransferStateToPalZStream(zStream, stream); return result; } int32_t CompressionNative_DeflateReset(PAL_ZStream* stream) { assert(stream != NULL); z_stream* zStream = GetCurrentZStream(stream); int32_t result = deflateReset(zStream); TransferStateToPalZStream(zStream, stream); return result; } int32_t CompressionNative_DeflateEnd(PAL_ZStream* stream) { assert(stream != NULL); z_stream* zStream = GetCurrentZStream(stream); int32_t result = deflateEnd(zStream); End(stream); return result; } int32_t CompressionNative_InflateInit2_(PAL_ZStream* stream, int32_t windowBits) { assert(stream != NULL); int32_t result = Init(stream); if (result == PAL_Z_OK) { z_stream* zStream = GetCurrentZStream(stream); result = inflateInit2(zStream, windowBits); TransferStateToPalZStream(zStream, stream); } return result; } int32_t CompressionNative_Inflate(PAL_ZStream* stream, int32_t flush) { assert(stream != NULL); z_stream* zStream = GetCurrentZStream(stream); int32_t result = inflate(zStream, flush); TransferStateToPalZStream(zStream, stream); return result; } int32_t CompressionNative_InflateReset(PAL_ZStream* stream) { assert(stream != NULL); z_stream* zStream = GetCurrentZStream(stream); int32_t result = inflateReset(zStream); TransferStateToPalZStream(zStream, stream); return result; } int32_t CompressionNative_InflateEnd(PAL_ZStream* stream) { assert(stream != NULL); z_stream* zStream = GetCurrentZStream(stream); int32_t result = inflateEnd(zStream); End(stream); return result; } uint32_t CompressionNative_Crc32(uint32_t crc, uint8_t* buffer, int32_t len) { assert(buffer != NULL); unsigned long result = crc32(crc, buffer, len); assert(result <= UINT32_MAX); return (uint32_t)result; }
-1
dotnet/runtime
66,234
Remove compiler warning suppression
Contributes to https://github.com/dotnet/runtime/issues/66154 All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address. ~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~ Upstream PR: https://github.com/libunwind/libunwind/pull/333 /cc @GrabYourPitchforks
AaronRobinsonMSFT
2022-03-05T03:45:49Z
2022-03-09T00:57:12Z
220e67755ae6323a01011b83070dff6f84b02519
853e494abd1c1e12d28a76829b4680af7f694afa
Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154 All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address. ~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~ Upstream PR: https://github.com/libunwind/libunwind/pull/333 /cc @GrabYourPitchforks
./src/native/external/brotli/enc/write_bits.h
/* Copyright 2010 Google Inc. All Rights Reserved. Distributed under MIT license. See file LICENSE for detail or copy at https://opensource.org/licenses/MIT */ /* Write bits into a byte array. */ #ifndef BROTLI_ENC_WRITE_BITS_H_ #define BROTLI_ENC_WRITE_BITS_H_ #include "../common/platform.h" #include <brotli/types.h> #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif /* This function writes bits into bytes in increasing addresses, and within a byte least-significant-bit first. The function can write up to 56 bits in one go with WriteBits Example: let's assume that 3 bits (Rs below) have been written already: BYTE-0 BYTE+1 BYTE+2 0000 0RRR 0000 0000 0000 0000 Now, we could write 5 or less bits in MSB by just shifting by 3 and OR'ing to BYTE-0. For n bits, we take the last 5 bits, OR that with high bits in BYTE-0, and locate the rest in BYTE+1, BYTE+2, etc. */ static BROTLI_INLINE void BrotliWriteBits(size_t n_bits, uint64_t bits, size_t* BROTLI_RESTRICT pos, uint8_t* BROTLI_RESTRICT array) { BROTLI_LOG(("WriteBits %2d 0x%08x%08x %10d\n", (int)n_bits, (uint32_t)(bits >> 32), (uint32_t)(bits & 0xFFFFFFFF), (int)*pos)); BROTLI_DCHECK((bits >> n_bits) == 0); BROTLI_DCHECK(n_bits <= 56); #if defined(BROTLI_LITTLE_ENDIAN) /* This branch of the code can write up to 56 bits at a time, 7 bits are lost by being perhaps already in *p and at least 1 bit is needed to initialize the bit-stream ahead (i.e. if 7 bits are in *p and we write 57 bits, then the next write will access a byte that was never initialized). */ { uint8_t* p = &array[*pos >> 3]; uint64_t v = (uint64_t)(*p); /* Zero-extend 8 to 64 bits. */ v |= bits << (*pos & 7); BROTLI_UNALIGNED_STORE64LE(p, v); /* Set some bits. */ *pos += n_bits; } #else /* implicit & 0xFF is assumed for uint8_t arithmetics */ { uint8_t* array_pos = &array[*pos >> 3]; const size_t bits_reserved_in_first_byte = (*pos & 7); size_t bits_left_to_write; bits <<= bits_reserved_in_first_byte; *array_pos++ |= (uint8_t)bits; for (bits_left_to_write = n_bits + bits_reserved_in_first_byte; bits_left_to_write >= 9; bits_left_to_write -= 8) { bits >>= 8; *array_pos++ = (uint8_t)bits; } *array_pos = 0; *pos += n_bits; } #endif } static BROTLI_INLINE void BrotliWriteBitsPrepareStorage( size_t pos, uint8_t* array) { BROTLI_LOG(("WriteBitsPrepareStorage %10d\n", (int)pos)); BROTLI_DCHECK((pos & 7) == 0); array[pos >> 3] = 0; } #if defined(__cplusplus) || defined(c_plusplus) } /* extern "C" */ #endif #endif /* BROTLI_ENC_WRITE_BITS_H_ */
/* Copyright 2010 Google Inc. All Rights Reserved. Distributed under MIT license. See file LICENSE for detail or copy at https://opensource.org/licenses/MIT */ /* Write bits into a byte array. */ #ifndef BROTLI_ENC_WRITE_BITS_H_ #define BROTLI_ENC_WRITE_BITS_H_ #include "../common/platform.h" #include <brotli/types.h> #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif /* This function writes bits into bytes in increasing addresses, and within a byte least-significant-bit first. The function can write up to 56 bits in one go with WriteBits Example: let's assume that 3 bits (Rs below) have been written already: BYTE-0 BYTE+1 BYTE+2 0000 0RRR 0000 0000 0000 0000 Now, we could write 5 or less bits in MSB by just shifting by 3 and OR'ing to BYTE-0. For n bits, we take the last 5 bits, OR that with high bits in BYTE-0, and locate the rest in BYTE+1, BYTE+2, etc. */ static BROTLI_INLINE void BrotliWriteBits(size_t n_bits, uint64_t bits, size_t* BROTLI_RESTRICT pos, uint8_t* BROTLI_RESTRICT array) { BROTLI_LOG(("WriteBits %2d 0x%08x%08x %10d\n", (int)n_bits, (uint32_t)(bits >> 32), (uint32_t)(bits & 0xFFFFFFFF), (int)*pos)); BROTLI_DCHECK((bits >> n_bits) == 0); BROTLI_DCHECK(n_bits <= 56); #if defined(BROTLI_LITTLE_ENDIAN) /* This branch of the code can write up to 56 bits at a time, 7 bits are lost by being perhaps already in *p and at least 1 bit is needed to initialize the bit-stream ahead (i.e. if 7 bits are in *p and we write 57 bits, then the next write will access a byte that was never initialized). */ { uint8_t* p = &array[*pos >> 3]; uint64_t v = (uint64_t)(*p); /* Zero-extend 8 to 64 bits. */ v |= bits << (*pos & 7); BROTLI_UNALIGNED_STORE64LE(p, v); /* Set some bits. */ *pos += n_bits; } #else /* implicit & 0xFF is assumed for uint8_t arithmetics */ { uint8_t* array_pos = &array[*pos >> 3]; const size_t bits_reserved_in_first_byte = (*pos & 7); size_t bits_left_to_write; bits <<= bits_reserved_in_first_byte; *array_pos++ |= (uint8_t)bits; for (bits_left_to_write = n_bits + bits_reserved_in_first_byte; bits_left_to_write >= 9; bits_left_to_write -= 8) { bits >>= 8; *array_pos++ = (uint8_t)bits; } *array_pos = 0; *pos += n_bits; } #endif } static BROTLI_INLINE void BrotliWriteBitsPrepareStorage( size_t pos, uint8_t* array) { BROTLI_LOG(("WriteBitsPrepareStorage %10d\n", (int)pos)); BROTLI_DCHECK((pos & 7) == 0); array[pos >> 3] = 0; } #if defined(__cplusplus) || defined(c_plusplus) } /* extern "C" */ #endif #endif /* BROTLI_ENC_WRITE_BITS_H_ */
-1
dotnet/runtime
66,234
Remove compiler warning suppression
Contributes to https://github.com/dotnet/runtime/issues/66154 All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address. ~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~ Upstream PR: https://github.com/libunwind/libunwind/pull/333 /cc @GrabYourPitchforks
AaronRobinsonMSFT
2022-03-05T03:45:49Z
2022-03-09T00:57:12Z
220e67755ae6323a01011b83070dff6f84b02519
853e494abd1c1e12d28a76829b4680af7f694afa
Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154 All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address. ~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~ Upstream PR: https://github.com/libunwind/libunwind/pull/333 /cc @GrabYourPitchforks
./src/native/corehost/hostmisc/pal.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #ifndef PAL_H #define PAL_H #include <string> #include <vector> #include <fstream> #include <sstream> #include <iostream> #include <cstring> #include <cstdarg> #include <cstdint> #include <tuple> #include <unordered_map> #include <unordered_set> #include <memory> #include <algorithm> #include <cassert> #if defined(_WIN32) #define NOMINMAX #include <windows.h> #define xerr std::wcerr #define xout std::wcout #define DIR_SEPARATOR L'\\' #define PATH_SEPARATOR L';' #define PATH_MAX MAX_PATH #define _X(s) L ## s #else #include <cstdlib> #include <unistd.h> #include <libgen.h> #include <mutex> #include <sys/stat.h> #include <sys/types.h> #include <sys/mman.h> #define xerr std::cerr #define xout std::cout #define DIR_SEPARATOR '/' #define PATH_SEPARATOR ':' #undef _X #define _X(s) s #define S_OK 0x00000000 #define E_NOTIMPL 0x80004001 #define E_FAIL 0x80004005 #define SUCCEEDED(Status) ((Status) >= 0) #endif // When running on a platform that is not supported in RID fallback graph (because it was unknown // at the time the SharedFX in question was built), we need to use a reasonable fallback RID to allow // consuming the native assets. // // For Windows and OSX, we will maintain the last highest RID-Platform we are known to support for them as the // degree of compat across their respective releases is usually high. // // We cannot maintain the same (compat) invariant for linux and thus, we will fallback to using lowest RID-Plaform. #if defined(TARGET_WINDOWS) #define LIB_PREFIX #define MAKE_LIBNAME(NAME) (_X(NAME) _X(".dll")) #define FALLBACK_HOST_RID _X("win10") #elif defined(TARGET_OSX) #define LIB_PREFIX _X("lib") #define MAKE_LIBNAME(NAME) (LIB_PREFIX _X(NAME) _X(".dylib")) #define FALLBACK_HOST_RID _X("osx.10.12") #else #define LIB_PREFIX _X("lib") #define MAKE_LIBNAME(NAME) (LIB_PREFIX _X(NAME) _X(".so")) #if defined(TARGET_FREEBSD) #define FALLBACK_HOST_RID _X("freebsd") #elif defined(TARGET_ILLUMOS) #define FALLBACK_HOST_RID _X("illumos") #elif defined(TARGET_SUNOS) #define FALLBACK_HOST_RID _X("solaris") #elif defined(TARGET_LINUX_MUSL) #define FALLBACK_HOST_RID _X("linux-musl") #else #define FALLBACK_HOST_RID _X("linux") #endif #endif #define LIBCORECLR_FILENAME (LIB_PREFIX _X("coreclr")) #define LIBCORECLR_NAME MAKE_LIBNAME("coreclr") #define CORELIB_NAME _X("System.Private.CoreLib.dll") #define LIBHOSTPOLICY_FILENAME (LIB_PREFIX _X("hostpolicy")) #define LIBHOSTPOLICY_NAME MAKE_LIBNAME("hostpolicy") #define LIBFXR_NAME MAKE_LIBNAME("hostfxr") #if !defined(PATH_MAX) && !defined(_WIN32) #define PATH_MAX 4096 #endif namespace pal { #if defined(_WIN32) #ifdef EXPORT_SHARED_API #define SHARED_API extern "C" __declspec(dllexport) #else #define SHARED_API extern "C" #endif #define STDMETHODCALLTYPE __stdcall typedef wchar_t char_t; typedef std::wstring string_t; typedef std::wstringstream stringstream_t; // TODO: Agree on the correct encoding of the files: The PoR for now is to // temporarily wchar for Windows and char for Unix. Current implementation // implicitly expects the contents on both Windows and Unix as char and // converts them to wchar in code for Windows. This line should become: // typedef std::basic_ifstream<char_t> ifstream_t. typedef std::basic_ifstream<char> ifstream_t; typedef std::istreambuf_iterator<ifstream_t::char_type> istreambuf_iterator_t; typedef std::basic_istream<char> istream_t; typedef HRESULT hresult_t; typedef HMODULE dll_t; typedef FARPROC proc_t; // Lockable object backed by CRITICAL_SECTION such that it does not pull in ConcRT. class mutex_t { public: mutex_t(); ~mutex_t(); mutex_t(const mutex_t&) = delete; mutex_t& operator=(const mutex_t&) = delete; void lock(); void unlock(); private: CRITICAL_SECTION _impl; }; inline string_t exe_suffix() { return _X(".exe"); } inline int cstrcasecmp(const char* str1, const char* str2) { return ::_stricmp(str1, str2); } inline int strcmp(const char_t* str1, const char_t* str2) { return ::wcscmp(str1, str2); } inline int strcasecmp(const char_t* str1, const char_t* str2) { return ::_wcsicmp(str1, str2); } inline int strncmp(const char_t* str1, const char_t* str2, size_t len) { return ::wcsncmp(str1, str2, len); } inline int strncasecmp(const char_t* str1, const char_t* str2, size_t len) { return ::_wcsnicmp(str1, str2, len); } inline int pathcmp(const pal::string_t& path1, const pal::string_t& path2) { return strcasecmp(path1.c_str(), path2.c_str()); } inline string_t to_string(int value) { return std::to_wstring(value); } inline size_t strlen(const char_t* str) { return ::wcslen(str); } #pragma warning(suppress : 4996) // error C4996: '_wfopen': This function or variable may be unsafe. inline FILE* file_open(const string_t& path, const char_t* mode) { return ::_wfopen(path.c_str(), mode); } inline void file_vprintf(FILE* f, const char_t* format, va_list vl) { ::vfwprintf(f, format, vl); ::fputwc(_X('\n'), f); } inline void err_fputs(const char_t* message) { ::fputws(message, stderr); ::fputwc(_X('\n'), stderr); } inline void out_vprintf(const char_t* format, va_list vl) { ::vfwprintf(stdout, format, vl); ::fputwc(_X('\n'), stdout); } // This API is being used correctly and querying for needed size first. #pragma warning(suppress : 4996) // error C4996: '_vsnwprintf': This function or variable may be unsafe. inline int str_vprintf(char_t* buffer, size_t count, const char_t* format, va_list vl) { return ::_vsnwprintf(buffer, count, format, vl); } // Suppressing warning since the 'safe' version requires an input buffer that is unnecessary for // uses of this function. #pragma warning(suppress : 4996) // error C4996: '_wcserror': This function or variable may be unsafe. inline const char_t* strerror(int errnum) { return ::_wcserror(errnum); } bool pal_utf8string(const string_t& str, std::vector<char>* out); bool pal_clrstring(const string_t& str, std::vector<char>* out); bool clr_palstring(const char* cstr, string_t* out); inline bool mkdir(const char_t* dir, int mode) { return CreateDirectoryW(dir, NULL) != 0; } inline bool rmdir(const char_t* path) { return RemoveDirectoryW(path) != 0; } inline int rename(const char_t* old_name, const char_t* new_name) { return ::_wrename(old_name, new_name); } inline int remove(const char_t* path) { return ::_wremove(path); } inline bool munmap(void* addr, size_t length) { return UnmapViewOfFile(addr) != 0; } inline int get_pid() { return GetCurrentProcessId(); } inline void sleep(uint32_t milliseconds) { Sleep(milliseconds); } #else #ifdef EXPORT_SHARED_API #define SHARED_API extern "C" __attribute__((__visibility__("default"))) #else #define SHARED_API extern "C" #endif #define __cdecl /* nothing */ #define __stdcall /* nothing */ #if !defined(TARGET_FREEBSD) #define __fastcall /* nothing */ #endif #define STDMETHODCALLTYPE __stdcall typedef char char_t; typedef std::string string_t; typedef std::stringstream stringstream_t; typedef std::basic_ifstream<char> ifstream_t; typedef std::istreambuf_iterator<ifstream_t::char_type> istreambuf_iterator_t; typedef std::basic_istream<char> istream_t; typedef int hresult_t; typedef void* dll_t; typedef void* proc_t; typedef std::mutex mutex_t; inline string_t exe_suffix() { return _X(""); } inline int cstrcasecmp(const char* str1, const char* str2) { return ::strcasecmp(str1, str2); } inline int strcmp(const char_t* str1, const char_t* str2) { return ::strcmp(str1, str2); } inline int strcasecmp(const char_t* str1, const char_t* str2) { return ::strcasecmp(str1, str2); } inline int strncmp(const char_t* str1, const char_t* str2, int len) { return ::strncmp(str1, str2, len); } inline int strncasecmp(const char_t* str1, const char_t* str2, int len) { return ::strncasecmp(str1, str2, len); } inline int pathcmp(const pal::string_t& path1, const pal::string_t& path2) { return strcmp(path1.c_str(), path2.c_str()); } inline string_t to_string(int value) { return std::to_string(value); } inline size_t strlen(const char_t* str) { return ::strlen(str); } inline FILE* file_open(const string_t& path, const char_t* mode) { return fopen(path.c_str(), mode); } inline void file_vprintf(FILE* f, const char_t* format, va_list vl) { ::vfprintf(f, format, vl); ::fputc('\n', f); } inline void err_fputs(const char_t* message) { ::fputs(message, stderr); ::fputc(_X('\n'), stderr); } inline void out_vprintf(const char_t* format, va_list vl) { ::vfprintf(stdout, format, vl); ::fputc('\n', stdout); } inline int str_vprintf(char_t* str, size_t size, const char_t* format, va_list vl) { return ::vsnprintf(str, size, format, vl); } inline const char_t* strerror(int errnum) { return ::strerror(errnum); } inline bool pal_utf8string(const string_t& str, std::vector<char>* out) { out->assign(str.begin(), str.end()); out->push_back('\0'); return true; } inline bool pal_clrstring(const string_t& str, std::vector<char>* out) { return pal_utf8string(str, out); } inline bool clr_palstring(const char* cstr, string_t* out) { out->assign(cstr); return true; } inline bool mkdir(const char_t* dir, int mode) { return ::mkdir(dir, mode) == 0; } inline bool rmdir(const char_t* path) { return ::rmdir(path) == 0; } inline int rename(const char_t* old_name, const char_t* new_name) { return ::rename(old_name, new_name); } inline int remove(const char_t* path) { return ::remove(path); } inline bool munmap(void* addr, size_t length) { return ::munmap(addr, length) == 0; } inline int get_pid() { return getpid(); } inline void sleep(uint32_t milliseconds) { usleep(milliseconds * 1000); } #endif inline int snwprintf(char_t* buffer, size_t count, const char_t* format, ...) { va_list args; va_start(args, format); int ret = str_vprintf(buffer, count, format, args); va_end(args); return ret; } string_t get_timestamp(); bool getcwd(string_t* recv); inline void file_flush(FILE* f) { std::fflush(f); } inline void err_flush() { std::fflush(stderr); } inline void out_flush() { std::fflush(stdout); } string_t get_current_os_rid_platform(); inline string_t get_current_os_fallback_rid() { string_t fallbackRid(FALLBACK_HOST_RID); return fallbackRid; } const void* mmap_read(const string_t& path, size_t* length = nullptr); void* mmap_copy_on_write(const string_t& path, size_t* length = nullptr); bool touch_file(const string_t& path); bool realpath(string_t* path, bool skip_error_logging = false); bool file_exists(const string_t& path); inline bool directory_exists(const string_t& path) { return file_exists(path); } void readdir(const string_t& path, const string_t& pattern, std::vector<string_t>* list); void readdir(const string_t& path, std::vector<string_t>* list); void readdir_onlydirectories(const string_t& path, const string_t& pattern, std::vector<string_t>* list); void readdir_onlydirectories(const string_t& path, std::vector<string_t>* list); bool get_own_executable_path(string_t* recv); bool get_own_module_path(string_t* recv); bool get_method_module_path(string_t* recv, void* method); bool get_module_path(dll_t mod, string_t* recv); bool get_current_module(dll_t* mod); bool getenv(const char_t* name, string_t* recv); bool get_default_servicing_directory(string_t* recv); // Returns the globally registered install location (if any) bool get_dotnet_self_registered_dir(string_t* recv); // Returns name of the global registry location (for error messages) string_t get_dotnet_self_registered_config_location(); // Returns the default install location for a given platform bool get_default_installation_dir(string_t* recv); // Returns the global locations to search for SDK/Frameworks - used when multi-level lookup is enabled bool get_global_dotnet_dirs(std::vector<string_t>* recv); bool get_default_breadcrumb_store(string_t* recv); bool is_path_rooted(const string_t& path); // Returns a platform-specific, user-private directory // that can be used for extracting out components of a single-file app. bool get_default_bundle_extraction_base_dir(string_t& extraction_dir); int xtoi(const char_t* input); bool get_loaded_library(const char_t* library_name, const char* symbol_name, /*out*/ dll_t* dll, /*out*/ string_t* path); bool load_library(const string_t* path, dll_t* dll); proc_t get_symbol(dll_t library, const char* name); void unload_library(dll_t library); bool is_running_in_wow64(); bool is_emulating_x64(); bool are_paths_equal_with_normalized_casing(const string_t& path1, const string_t& path2); } #endif // PAL_H
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #ifndef PAL_H #define PAL_H #include <string> #include <vector> #include <fstream> #include <sstream> #include <iostream> #include <cstring> #include <cstdarg> #include <cstdint> #include <tuple> #include <unordered_map> #include <unordered_set> #include <memory> #include <algorithm> #include <cassert> #if defined(_WIN32) #define NOMINMAX #include <windows.h> #define xerr std::wcerr #define xout std::wcout #define DIR_SEPARATOR L'\\' #define PATH_SEPARATOR L';' #define PATH_MAX MAX_PATH #define _X(s) L ## s #else #include <cstdlib> #include <unistd.h> #include <libgen.h> #include <mutex> #include <sys/stat.h> #include <sys/types.h> #include <sys/mman.h> #define xerr std::cerr #define xout std::cout #define DIR_SEPARATOR '/' #define PATH_SEPARATOR ':' #undef _X #define _X(s) s #define S_OK 0x00000000 #define E_NOTIMPL 0x80004001 #define E_FAIL 0x80004005 #define SUCCEEDED(Status) ((Status) >= 0) #endif // When running on a platform that is not supported in RID fallback graph (because it was unknown // at the time the SharedFX in question was built), we need to use a reasonable fallback RID to allow // consuming the native assets. // // For Windows and OSX, we will maintain the last highest RID-Platform we are known to support for them as the // degree of compat across their respective releases is usually high. // // We cannot maintain the same (compat) invariant for linux and thus, we will fallback to using lowest RID-Plaform. #if defined(TARGET_WINDOWS) #define LIB_PREFIX #define MAKE_LIBNAME(NAME) (_X(NAME) _X(".dll")) #define FALLBACK_HOST_RID _X("win10") #elif defined(TARGET_OSX) #define LIB_PREFIX _X("lib") #define MAKE_LIBNAME(NAME) (LIB_PREFIX _X(NAME) _X(".dylib")) #define FALLBACK_HOST_RID _X("osx.10.12") #else #define LIB_PREFIX _X("lib") #define MAKE_LIBNAME(NAME) (LIB_PREFIX _X(NAME) _X(".so")) #if defined(TARGET_FREEBSD) #define FALLBACK_HOST_RID _X("freebsd") #elif defined(TARGET_ILLUMOS) #define FALLBACK_HOST_RID _X("illumos") #elif defined(TARGET_SUNOS) #define FALLBACK_HOST_RID _X("solaris") #elif defined(TARGET_LINUX_MUSL) #define FALLBACK_HOST_RID _X("linux-musl") #else #define FALLBACK_HOST_RID _X("linux") #endif #endif #define LIBCORECLR_FILENAME (LIB_PREFIX _X("coreclr")) #define LIBCORECLR_NAME MAKE_LIBNAME("coreclr") #define CORELIB_NAME _X("System.Private.CoreLib.dll") #define LIBHOSTPOLICY_FILENAME (LIB_PREFIX _X("hostpolicy")) #define LIBHOSTPOLICY_NAME MAKE_LIBNAME("hostpolicy") #define LIBFXR_NAME MAKE_LIBNAME("hostfxr") #if !defined(PATH_MAX) && !defined(_WIN32) #define PATH_MAX 4096 #endif namespace pal { #if defined(_WIN32) #ifdef EXPORT_SHARED_API #define SHARED_API extern "C" __declspec(dllexport) #else #define SHARED_API extern "C" #endif #define STDMETHODCALLTYPE __stdcall typedef wchar_t char_t; typedef std::wstring string_t; typedef std::wstringstream stringstream_t; // TODO: Agree on the correct encoding of the files: The PoR for now is to // temporarily wchar for Windows and char for Unix. Current implementation // implicitly expects the contents on both Windows and Unix as char and // converts them to wchar in code for Windows. This line should become: // typedef std::basic_ifstream<char_t> ifstream_t. typedef std::basic_ifstream<char> ifstream_t; typedef std::istreambuf_iterator<ifstream_t::char_type> istreambuf_iterator_t; typedef std::basic_istream<char> istream_t; typedef HRESULT hresult_t; typedef HMODULE dll_t; typedef FARPROC proc_t; // Lockable object backed by CRITICAL_SECTION such that it does not pull in ConcRT. class mutex_t { public: mutex_t(); ~mutex_t(); mutex_t(const mutex_t&) = delete; mutex_t& operator=(const mutex_t&) = delete; void lock(); void unlock(); private: CRITICAL_SECTION _impl; }; inline string_t exe_suffix() { return _X(".exe"); } inline int cstrcasecmp(const char* str1, const char* str2) { return ::_stricmp(str1, str2); } inline int strcmp(const char_t* str1, const char_t* str2) { return ::wcscmp(str1, str2); } inline int strcasecmp(const char_t* str1, const char_t* str2) { return ::_wcsicmp(str1, str2); } inline int strncmp(const char_t* str1, const char_t* str2, size_t len) { return ::wcsncmp(str1, str2, len); } inline int strncasecmp(const char_t* str1, const char_t* str2, size_t len) { return ::_wcsnicmp(str1, str2, len); } inline int pathcmp(const pal::string_t& path1, const pal::string_t& path2) { return strcasecmp(path1.c_str(), path2.c_str()); } inline string_t to_string(int value) { return std::to_wstring(value); } inline size_t strlen(const char_t* str) { return ::wcslen(str); } #pragma warning(suppress : 4996) // error C4996: '_wfopen': This function or variable may be unsafe. inline FILE* file_open(const string_t& path, const char_t* mode) { return ::_wfopen(path.c_str(), mode); } inline void file_vprintf(FILE* f, const char_t* format, va_list vl) { ::vfwprintf(f, format, vl); ::fputwc(_X('\n'), f); } inline void err_fputs(const char_t* message) { ::fputws(message, stderr); ::fputwc(_X('\n'), stderr); } inline void out_vprintf(const char_t* format, va_list vl) { ::vfwprintf(stdout, format, vl); ::fputwc(_X('\n'), stdout); } // This API is being used correctly and querying for needed size first. #pragma warning(suppress : 4996) // error C4996: '_vsnwprintf': This function or variable may be unsafe. inline int str_vprintf(char_t* buffer, size_t count, const char_t* format, va_list vl) { return ::_vsnwprintf(buffer, count, format, vl); } // Suppressing warning since the 'safe' version requires an input buffer that is unnecessary for // uses of this function. #pragma warning(suppress : 4996) // error C4996: '_wcserror': This function or variable may be unsafe. inline const char_t* strerror(int errnum) { return ::_wcserror(errnum); } bool pal_utf8string(const string_t& str, std::vector<char>* out); bool pal_clrstring(const string_t& str, std::vector<char>* out); bool clr_palstring(const char* cstr, string_t* out); inline bool mkdir(const char_t* dir, int mode) { return CreateDirectoryW(dir, NULL) != 0; } inline bool rmdir(const char_t* path) { return RemoveDirectoryW(path) != 0; } inline int rename(const char_t* old_name, const char_t* new_name) { return ::_wrename(old_name, new_name); } inline int remove(const char_t* path) { return ::_wremove(path); } inline bool munmap(void* addr, size_t length) { return UnmapViewOfFile(addr) != 0; } inline int get_pid() { return GetCurrentProcessId(); } inline void sleep(uint32_t milliseconds) { Sleep(milliseconds); } #else #ifdef EXPORT_SHARED_API #define SHARED_API extern "C" __attribute__((__visibility__("default"))) #else #define SHARED_API extern "C" #endif #define __cdecl /* nothing */ #define __stdcall /* nothing */ #if !defined(TARGET_FREEBSD) #define __fastcall /* nothing */ #endif #define STDMETHODCALLTYPE __stdcall typedef char char_t; typedef std::string string_t; typedef std::stringstream stringstream_t; typedef std::basic_ifstream<char> ifstream_t; typedef std::istreambuf_iterator<ifstream_t::char_type> istreambuf_iterator_t; typedef std::basic_istream<char> istream_t; typedef int hresult_t; typedef void* dll_t; typedef void* proc_t; typedef std::mutex mutex_t; inline string_t exe_suffix() { return _X(""); } inline int cstrcasecmp(const char* str1, const char* str2) { return ::strcasecmp(str1, str2); } inline int strcmp(const char_t* str1, const char_t* str2) { return ::strcmp(str1, str2); } inline int strcasecmp(const char_t* str1, const char_t* str2) { return ::strcasecmp(str1, str2); } inline int strncmp(const char_t* str1, const char_t* str2, int len) { return ::strncmp(str1, str2, len); } inline int strncasecmp(const char_t* str1, const char_t* str2, int len) { return ::strncasecmp(str1, str2, len); } inline int pathcmp(const pal::string_t& path1, const pal::string_t& path2) { return strcmp(path1.c_str(), path2.c_str()); } inline string_t to_string(int value) { return std::to_string(value); } inline size_t strlen(const char_t* str) { return ::strlen(str); } inline FILE* file_open(const string_t& path, const char_t* mode) { return fopen(path.c_str(), mode); } inline void file_vprintf(FILE* f, const char_t* format, va_list vl) { ::vfprintf(f, format, vl); ::fputc('\n', f); } inline void err_fputs(const char_t* message) { ::fputs(message, stderr); ::fputc(_X('\n'), stderr); } inline void out_vprintf(const char_t* format, va_list vl) { ::vfprintf(stdout, format, vl); ::fputc('\n', stdout); } inline int str_vprintf(char_t* str, size_t size, const char_t* format, va_list vl) { return ::vsnprintf(str, size, format, vl); } inline const char_t* strerror(int errnum) { return ::strerror(errnum); } inline bool pal_utf8string(const string_t& str, std::vector<char>* out) { out->assign(str.begin(), str.end()); out->push_back('\0'); return true; } inline bool pal_clrstring(const string_t& str, std::vector<char>* out) { return pal_utf8string(str, out); } inline bool clr_palstring(const char* cstr, string_t* out) { out->assign(cstr); return true; } inline bool mkdir(const char_t* dir, int mode) { return ::mkdir(dir, mode) == 0; } inline bool rmdir(const char_t* path) { return ::rmdir(path) == 0; } inline int rename(const char_t* old_name, const char_t* new_name) { return ::rename(old_name, new_name); } inline int remove(const char_t* path) { return ::remove(path); } inline bool munmap(void* addr, size_t length) { return ::munmap(addr, length) == 0; } inline int get_pid() { return getpid(); } inline void sleep(uint32_t milliseconds) { usleep(milliseconds * 1000); } #endif inline int snwprintf(char_t* buffer, size_t count, const char_t* format, ...) { va_list args; va_start(args, format); int ret = str_vprintf(buffer, count, format, args); va_end(args); return ret; } string_t get_timestamp(); bool getcwd(string_t* recv); inline void file_flush(FILE* f) { std::fflush(f); } inline void err_flush() { std::fflush(stderr); } inline void out_flush() { std::fflush(stdout); } string_t get_current_os_rid_platform(); inline string_t get_current_os_fallback_rid() { string_t fallbackRid(FALLBACK_HOST_RID); return fallbackRid; } const void* mmap_read(const string_t& path, size_t* length = nullptr); void* mmap_copy_on_write(const string_t& path, size_t* length = nullptr); bool touch_file(const string_t& path); bool realpath(string_t* path, bool skip_error_logging = false); bool file_exists(const string_t& path); inline bool directory_exists(const string_t& path) { return file_exists(path); } void readdir(const string_t& path, const string_t& pattern, std::vector<string_t>* list); void readdir(const string_t& path, std::vector<string_t>* list); void readdir_onlydirectories(const string_t& path, const string_t& pattern, std::vector<string_t>* list); void readdir_onlydirectories(const string_t& path, std::vector<string_t>* list); bool get_own_executable_path(string_t* recv); bool get_own_module_path(string_t* recv); bool get_method_module_path(string_t* recv, void* method); bool get_module_path(dll_t mod, string_t* recv); bool get_current_module(dll_t* mod); bool getenv(const char_t* name, string_t* recv); bool get_default_servicing_directory(string_t* recv); // Returns the globally registered install location (if any) bool get_dotnet_self_registered_dir(string_t* recv); // Returns name of the global registry location (for error messages) string_t get_dotnet_self_registered_config_location(); // Returns the default install location for a given platform bool get_default_installation_dir(string_t* recv); // Returns the global locations to search for SDK/Frameworks - used when multi-level lookup is enabled bool get_global_dotnet_dirs(std::vector<string_t>* recv); bool get_default_breadcrumb_store(string_t* recv); bool is_path_rooted(const string_t& path); // Returns a platform-specific, user-private directory // that can be used for extracting out components of a single-file app. bool get_default_bundle_extraction_base_dir(string_t& extraction_dir); int xtoi(const char_t* input); bool get_loaded_library(const char_t* library_name, const char* symbol_name, /*out*/ dll_t* dll, /*out*/ string_t* path); bool load_library(const string_t* path, dll_t* dll); proc_t get_symbol(dll_t library, const char* name); void unload_library(dll_t library); bool is_running_in_wow64(); bool is_emulating_x64(); bool are_paths_equal_with_normalized_casing(const string_t& path1, const string_t& path2); } #endif // PAL_H
-1
dotnet/runtime
66,234
Remove compiler warning suppression
Contributes to https://github.com/dotnet/runtime/issues/66154 All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address. ~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~ Upstream PR: https://github.com/libunwind/libunwind/pull/333 /cc @GrabYourPitchforks
AaronRobinsonMSFT
2022-03-05T03:45:49Z
2022-03-09T00:57:12Z
220e67755ae6323a01011b83070dff6f84b02519
853e494abd1c1e12d28a76829b4680af7f694afa
Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154 All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address. ~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~ Upstream PR: https://github.com/libunwind/libunwind/pull/333 /cc @GrabYourPitchforks
./src/coreclr/pal/src/libunwind/src/mi/Gput_dynamic_unwind_info.c
/* libunwind - a platform-independent unwind library Copyright (C) 2001-2002, 2005 Hewlett-Packard Co Contributed by David Mosberger-Tang <[email protected]> This file is part of libunwind. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "libunwind_i.h" HIDDEN void unwi_put_dynamic_unwind_info (unw_addr_space_t as, unw_proc_info_t *pi, void *arg) { switch (pi->format) { case UNW_INFO_FORMAT_DYNAMIC: #ifndef UNW_LOCAL_ONLY # ifdef UNW_REMOTE_ONLY unwi_dyn_remote_put_unwind_info (as, pi, arg); # else if (as != unw_local_addr_space) unwi_dyn_remote_put_unwind_info (as, pi, arg); # endif #endif break; case UNW_INFO_FORMAT_TABLE: case UNW_INFO_FORMAT_REMOTE_TABLE: #ifdef tdep_put_unwind_info tdep_put_unwind_info (as, pi, arg); break; #endif /* fall through */ default: break; } }
/* libunwind - a platform-independent unwind library Copyright (C) 2001-2002, 2005 Hewlett-Packard Co Contributed by David Mosberger-Tang <[email protected]> This file is part of libunwind. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "libunwind_i.h" HIDDEN void unwi_put_dynamic_unwind_info (unw_addr_space_t as, unw_proc_info_t *pi, void *arg) { switch (pi->format) { case UNW_INFO_FORMAT_DYNAMIC: #ifndef UNW_LOCAL_ONLY # ifdef UNW_REMOTE_ONLY unwi_dyn_remote_put_unwind_info (as, pi, arg); # else if (as != unw_local_addr_space) unwi_dyn_remote_put_unwind_info (as, pi, arg); # endif #endif break; case UNW_INFO_FORMAT_TABLE: case UNW_INFO_FORMAT_REMOTE_TABLE: #ifdef tdep_put_unwind_info tdep_put_unwind_info (as, pi, arg); break; #endif /* fall through */ default: break; } }
-1
dotnet/runtime
66,234
Remove compiler warning suppression
Contributes to https://github.com/dotnet/runtime/issues/66154 All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address. ~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~ Upstream PR: https://github.com/libunwind/libunwind/pull/333 /cc @GrabYourPitchforks
AaronRobinsonMSFT
2022-03-05T03:45:49Z
2022-03-09T00:57:12Z
220e67755ae6323a01011b83070dff6f84b02519
853e494abd1c1e12d28a76829b4680af7f694afa
Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154 All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address. ~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~ Upstream PR: https://github.com/libunwind/libunwind/pull/333 /cc @GrabYourPitchforks
./src/coreclr/pal/tests/palsuite/file_io/ReadFile/test1/ReadFile.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*===================================================================== ** ** Source: ReadFile.c (test 1) ** ** Purpose: Tests the PAL implementation of the ReadFile function. ** This test will attempt to read from a NULL handle and from ** a file without read permissions set. ** ** **===================================================================*/ #include <palsuite.h> PALTEST(file_io_ReadFile_test1_paltest_readfile_test1, "file_io/ReadFile/test1/paltest_readfile_test1") { HANDLE hFile = NULL; DWORD dwByteCount = 0; DWORD dwBytesRead = 0; BOOL bRc = FALSE; char szBuffer[256]; char* szNonReadableFile = {"nonreadablefile.txt"}; if (0 != PAL_Initialize(argc,argv)) { return FAIL; } memset(szBuffer, 0, 256); /* Read from a NULL handle */ bRc = ReadFile(hFile, szBuffer, 20, &dwBytesRead, NULL); if (bRc == TRUE) { Fail("ReadFile: ERROR -> Able to read from a NULL handle\n"); } /* Read from a file without read permissions */ #if WIN32 #else /* attempt to read from the unreadable file * open a file without read permissions */ hFile = CreateFile(szNonReadableFile, GENERIC_WRITE, FILE_SHARE_WRITE, NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL); if(hFile == INVALID_HANDLE_VALUE) { dwByteCount = GetLastError(); Fail("ReadFile: ERROR -> Unable to create file \"%s\".\n", szNonReadableFile); } bRc = ReadFile(hFile, szBuffer, 20, &dwBytesRead, NULL); if (bRc == TRUE) { Fail("ReadFile: ERROR -> Able to read from a file without read " "permissions\n"); } #endif PAL_Terminate(); return PASS; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*===================================================================== ** ** Source: ReadFile.c (test 1) ** ** Purpose: Tests the PAL implementation of the ReadFile function. ** This test will attempt to read from a NULL handle and from ** a file without read permissions set. ** ** **===================================================================*/ #include <palsuite.h> PALTEST(file_io_ReadFile_test1_paltest_readfile_test1, "file_io/ReadFile/test1/paltest_readfile_test1") { HANDLE hFile = NULL; DWORD dwByteCount = 0; DWORD dwBytesRead = 0; BOOL bRc = FALSE; char szBuffer[256]; char* szNonReadableFile = {"nonreadablefile.txt"}; if (0 != PAL_Initialize(argc,argv)) { return FAIL; } memset(szBuffer, 0, 256); /* Read from a NULL handle */ bRc = ReadFile(hFile, szBuffer, 20, &dwBytesRead, NULL); if (bRc == TRUE) { Fail("ReadFile: ERROR -> Able to read from a NULL handle\n"); } /* Read from a file without read permissions */ #if WIN32 #else /* attempt to read from the unreadable file * open a file without read permissions */ hFile = CreateFile(szNonReadableFile, GENERIC_WRITE, FILE_SHARE_WRITE, NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL); if(hFile == INVALID_HANDLE_VALUE) { dwByteCount = GetLastError(); Fail("ReadFile: ERROR -> Unable to create file \"%s\".\n", szNonReadableFile); } bRc = ReadFile(hFile, szBuffer, 20, &dwBytesRead, NULL); if (bRc == TRUE) { Fail("ReadFile: ERROR -> Able to read from a file without read " "permissions\n"); } #endif PAL_Terminate(); return PASS; }
-1
dotnet/runtime
66,234
Remove compiler warning suppression
Contributes to https://github.com/dotnet/runtime/issues/66154 All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address. ~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~ Upstream PR: https://github.com/libunwind/libunwind/pull/333 /cc @GrabYourPitchforks
AaronRobinsonMSFT
2022-03-05T03:45:49Z
2022-03-09T00:57:12Z
220e67755ae6323a01011b83070dff6f84b02519
853e494abd1c1e12d28a76829b4680af7f694afa
Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154 All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address. ~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~ Upstream PR: https://github.com/libunwind/libunwind/pull/333 /cc @GrabYourPitchforks
./src/coreclr/jit/emitxarch.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX emitX86.cpp XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ #include "jitpch.h" #ifdef _MSC_VER #pragma hdrstop #endif #if defined(TARGET_XARCH) /*****************************************************************************/ /*****************************************************************************/ #include "instr.h" #include "emit.h" #include "codegen.h" bool emitter::IsSSEInstruction(instruction ins) { return (ins >= INS_FIRST_SSE_INSTRUCTION) && (ins <= INS_LAST_SSE_INSTRUCTION); } bool emitter::IsSSEOrAVXInstruction(instruction ins) { return (ins >= INS_FIRST_SSE_INSTRUCTION) && (ins <= INS_LAST_AVX_INSTRUCTION); } bool emitter::IsAVXOnlyInstruction(instruction ins) { return (ins >= INS_FIRST_AVX_INSTRUCTION) && (ins <= INS_LAST_AVX_INSTRUCTION); } bool emitter::IsFMAInstruction(instruction ins) { return (ins >= INS_FIRST_FMA_INSTRUCTION) && (ins <= INS_LAST_FMA_INSTRUCTION); } bool emitter::IsAVXVNNIInstruction(instruction ins) { return (ins >= INS_FIRST_AVXVNNI_INSTRUCTION) && (ins <= INS_LAST_AVXVNNI_INSTRUCTION); } bool emitter::IsBMIInstruction(instruction ins) { return (ins >= INS_FIRST_BMI_INSTRUCTION) && (ins <= INS_LAST_BMI_INSTRUCTION); } regNumber emitter::getBmiRegNumber(instruction ins) { switch (ins) { case INS_blsi: { return (regNumber)3; } case INS_blsmsk: { return (regNumber)2; } case INS_blsr: { return (regNumber)1; } default: { assert(IsBMIInstruction(ins)); return REG_NA; } } } regNumber emitter::getSseShiftRegNumber(instruction ins) { switch (ins) { case INS_psrldq: { return (regNumber)3; } case INS_pslldq: { return (regNumber)7; } case INS_psrld: case INS_psrlw: case INS_psrlq: { return (regNumber)2; } case INS_pslld: case INS_psllw: case INS_psllq: { return (regNumber)6; } case INS_psrad: case INS_psraw: { return (regNumber)4; } default: { assert(!"Invalid instruction for SSE2 instruction of the form: opcode reg, immed8"); return REG_NA; } } } bool emitter::IsAVXInstruction(instruction ins) const { return UseVEXEncoding() && IsSSEOrAVXInstruction(ins); } // Returns true if the AVX instruction is a binary operator that requires 3 operands. // When we emit an instruction with only two operands, we will duplicate the destination // as a source. // TODO-XArch-Cleanup: This is a temporary solution for now. Eventually this needs to // be formalized by adding an additional field to instruction table to // to indicate whether a 3-operand instruction. bool emitter::IsDstDstSrcAVXInstruction(instruction ins) { return ((CodeGenInterface::instInfo[ins] & INS_Flags_IsDstDstSrcAVXInstruction) != 0) && IsAVXInstruction(ins); } // Returns true if the AVX instruction requires 3 operands that duplicate the source // register in the vvvv field. // TODO-XArch-Cleanup: This is a temporary solution for now. Eventually this needs to // be formalized by adding an additional field to instruction table to // to indicate whether a 3-operand instruction. bool emitter::IsDstSrcSrcAVXInstruction(instruction ins) { return ((CodeGenInterface::instInfo[ins] & INS_Flags_IsDstSrcSrcAVXInstruction) != 0) && IsAVXInstruction(ins); } //------------------------------------------------------------------------ // HasRegularWideForm: Many x86/x64 instructions follow a regular encoding scheme where the // byte-sized version of an instruction has the lowest bit of the opcode cleared // while the 32-bit version of the instruction (taking potential prefixes to // override operand size) has the lowest bit set. This function returns true if // the instruction follows this format. // // Note that this bit is called `w` in the encoding table in Section B.2 of // Volume 2 of the Intel Architecture Software Developer Manual. // // Arguments: // ins - instruction to test // // Return Value: // true if instruction has a regular form where the 'w' bit needs to be set. bool emitter::HasRegularWideForm(instruction ins) { return ((CodeGenInterface::instInfo[ins] & INS_FLAGS_Has_Wbit) != 0); } //------------------------------------------------------------------------ // HasRegularWideImmediateForm: As above in HasRegularWideForm, many instructions taking // immediates have a regular form used to encode whether the instruction takes a sign-extended // 1-byte immediate or a (in 64-bit sign-extended) 4-byte immediate, by respectively setting and // clearing the second lowest bit. // // Note that this bit is called `s` in the encoding table in Section B.2 of // Volume 2 of the Intel Architecture Software Developer Manual. // // Arguments: // ins - instruction to test // // Return Value: // true if instruction has a regular wide immediate form where the 's' bit needs to set. bool emitter::HasRegularWideImmediateForm(instruction ins) { return ((CodeGenInterface::instInfo[ins] & INS_FLAGS_Has_Sbit) != 0); } //------------------------------------------------------------------------ // DoesWriteZeroFlag: check if the instruction write the // ZF flag. // // Arguments: // ins - instruction to test // // Return Value: // true if instruction writes the ZF flag, false otherwise. // bool emitter::DoesWriteZeroFlag(instruction ins) { return (CodeGenInterface::instInfo[ins] & Writes_ZF) != 0; } //------------------------------------------------------------------------ // DoesWriteSignFlag: check if the instruction writes the // SF flag. // // Arguments: // ins - instruction to test // // Return Value: // true if instruction writes the SF flag, false otherwise. // bool emitter::DoesWriteSignFlag(instruction ins) { return (CodeGenInterface::instInfo[ins] & Writes_SF) != 0; } //------------------------------------------------------------------------ // DoesResetOverflowAndCarryFlags: check if the instruction resets the // OF and CF flag to 0. // // Arguments: // ins - instruction to test // // Return Value: // true if instruction resets the OF and CF flag, false otherwise. // bool emitter::DoesResetOverflowAndCarryFlags(instruction ins) { return (CodeGenInterface::instInfo[ins] & (Resets_OF | Resets_CF)) == (Resets_OF | Resets_CF); } //------------------------------------------------------------------------ // IsFlagsAlwaysModified: check if the instruction guarantee to modify any flags. // // Arguments: // id - instruction to test // // Return Value: // false, if instruction is guaranteed to not modify any flag. // true, if instruction will modify some flag. // bool emitter::IsFlagsAlwaysModified(instrDesc* id) { instruction ins = id->idIns(); insFormat fmt = id->idInsFmt(); if (fmt == IF_RRW_SHF) { if (id->idIsLargeCns()) { return true; } else if (id->idSmallCns() == 0) { switch (ins) { // If shift-amount for below instructions is 0, then flags are unaffected. case INS_rcl_N: case INS_rcr_N: case INS_rol_N: case INS_ror_N: case INS_shl_N: case INS_shr_N: case INS_sar_N: return false; default: return true; } } } else if (fmt == IF_RRW) { switch (ins) { // If shift-amount for below instructions is 0, then flags are unaffected. // So, to be conservative, do not optimize if the instruction has register // as the shift-amount operand. case INS_rcl: case INS_rcr: case INS_rol: case INS_ror: case INS_shl: case INS_shr: case INS_sar: return false; default: return true; } } return true; } //------------------------------------------------------------------------ // AreUpper32BitsZero: check if some previously emitted // instruction set the upper 32 bits of reg to zero. // // Arguments: // reg - register of interest // // Return Value: // true if previous instruction zeroed reg's upper 32 bits. // false if it did not, or if we can't safely determine. // // Notes: // Currently only looks back one instruction. // // movsx eax, ... might seem viable but we always encode this // instruction with a 64 bit destination. See TakesRexWPrefix. bool emitter::AreUpper32BitsZero(regNumber reg) { // If there are no instructions in this IG, we can look back at // the previous IG's instructions if this IG is an extension. // if ((emitCurIGinsCnt == 0) && ((emitCurIG->igFlags & IGF_EXTEND) == 0)) { return false; } instrDesc* id = emitLastIns; insFormat fmt = id->idInsFmt(); // This isn't meant to be a comprehensive check. Just look for what // seems to be common. switch (fmt) { case IF_RWR_CNS: case IF_RRW_CNS: case IF_RRW_SHF: case IF_RWR_RRD: case IF_RRW_RRD: case IF_RWR_MRD: case IF_RWR_SRD: case IF_RWR_ARD: // Bail if not writing to the right register if (id->idReg1() != reg) { return false; } // Bail if movsx, we always have movsx sign extend to 8 bytes if (id->idIns() == INS_movsx) { return false; } #ifdef TARGET_AMD64 if (id->idIns() == INS_movsxd) { return false; } #endif // movzx always zeroes the upper 32 bits. if (id->idIns() == INS_movzx) { return true; } // Else rely on operation size. return (id->idOpSize() == EA_4BYTE); default: break; } return false; } //------------------------------------------------------------------------ // AreFlagsSetToZeroCmp: Checks if the previous instruction set the SZ, and optionally OC, flags to // the same values as if there were a compare to 0 // // Arguments: // reg - register of interest // opSize - size of register // treeOps - type of tree node operation // // Return Value: // true if the previous instruction set the flags for reg // false if not, or if we can't safely determine // // Notes: // Currently only looks back one instruction. bool emitter::AreFlagsSetToZeroCmp(regNumber reg, emitAttr opSize, genTreeOps treeOps) { assert(reg != REG_NA); if (!emitComp->opts.OptimizationEnabled()) { return false; } // Don't look back across IG boundaries (possible control flow) if (emitCurIGinsCnt == 0 && ((emitCurIG->igFlags & IGF_EXTEND) == 0)) { return false; } instrDesc* id = emitLastIns; instruction lastIns = id->idIns(); insFormat fmt = id->idInsFmt(); // make sure op1 is a reg switch (fmt) { case IF_RWR_CNS: case IF_RRW_CNS: case IF_RRW_SHF: case IF_RWR_RRD: case IF_RRW_RRD: case IF_RWR_MRD: case IF_RWR_SRD: case IF_RRW_SRD: case IF_RWR_ARD: case IF_RRW_ARD: case IF_RWR: case IF_RRD: case IF_RRW: case IF_RWR_RRD_RRD: case IF_RWR_RRD_MRD: case IF_RWR_RRD_ARD: case IF_RWR_RRD_SRD: break; default: return false; } if (id->idReg1() != reg) { return false; } // Certain instruction like and, or and xor modifies exactly same flags // as "test" instruction. // They reset OF and CF to 0 and modifies SF, ZF and PF. if (DoesResetOverflowAndCarryFlags(lastIns)) { return id->idOpSize() == opSize; } if ((treeOps == GT_EQ) || (treeOps == GT_NE)) { if (DoesWriteZeroFlag(lastIns) && IsFlagsAlwaysModified(id)) { return id->idOpSize() == opSize; } } return false; } //------------------------------------------------------------------------ // AreFlagsSetToForSignJumpOpt: checks if the previous instruction set the SF if the tree // node qualifies for a jg/jle to jns/js optimization // // Arguments: // reg - register of interest // opSize - size of register // relop - relational tree node // // Return Value: // true if the tree node qualifies for the jg/jle to jns/js optimization // false if not, or if we can't safely determine // // Notes: // Currently only looks back one instruction. bool emitter::AreFlagsSetForSignJumpOpt(regNumber reg, emitAttr opSize, GenTree* relop) { assert(reg != REG_NA); if (!emitComp->opts.OptimizationEnabled()) { return false; } // Don't look back across IG boundaries (possible control flow) if (emitCurIGinsCnt == 0 && ((emitCurIG->igFlags & IGF_EXTEND) == 0)) { return false; } instrDesc* id = emitLastIns; instruction lastIns = id->idIns(); insFormat fmt = id->idInsFmt(); // make sure op1 is a reg switch (fmt) { case IF_RWR_CNS: case IF_RRW_CNS: case IF_RRW_SHF: case IF_RWR_RRD: case IF_RRW_RRD: case IF_RWR_MRD: case IF_RWR_SRD: case IF_RRW_SRD: case IF_RWR_ARD: case IF_RRW_ARD: case IF_RWR: case IF_RRD: case IF_RRW: break; default: return false; } if (id->idReg1() != reg) { return false; } // If we have a GT_GE/GT_LT which generates an jge/jl, and the previous instruction // sets the SF, we can omit a test instruction and check for jns/js. if ((relop->OperGet() == GT_GE || relop->OperGet() == GT_LT) && !GenCondition::FromRelop(relop).IsUnsigned()) { if (DoesWriteSignFlag(lastIns) && IsFlagsAlwaysModified(id)) { return id->idOpSize() == opSize; } } return false; } //------------------------------------------------------------------------ // IsDstSrcImmAvxInstruction: Checks if the instruction has a "reg, reg/mem, imm" or // "reg/mem, reg, imm" form for the legacy, VEX, and EVEX // encodings. // // Arguments: // instruction -- processor instruction to check // // Return Value: // true if instruction has a "reg, reg/mem, imm" or "reg/mem, reg, imm" encoding // form for the legacy, VEX, and EVEX encodings. // // That is, the instruction takes two operands, one of which is immediate, and it // does not need to encode any data in the VEX.vvvv field. // static bool IsDstSrcImmAvxInstruction(instruction ins) { switch (ins) { case INS_aeskeygenassist: case INS_extractps: case INS_pextrb: case INS_pextrw: case INS_pextrd: case INS_pextrq: case INS_pshufd: case INS_pshufhw: case INS_pshuflw: case INS_roundpd: case INS_roundps: return true; default: return false; } } // ------------------------------------------------------------------- // Is4ByteSSEInstruction: Returns true if the SSE instruction is a 4-byte opcode. // // Arguments: // ins - instruction // // Note that this should be true for any of the instructions in instrsXArch.h // that use the SSE38 or SSE3A macro but returns false if the VEX encoding is // in use, since that encoding does not require an additional byte. bool emitter::Is4ByteSSEInstruction(instruction ins) { return !UseVEXEncoding() && EncodedBySSE38orSSE3A(ins); } // Returns true if this instruction requires a VEX prefix // All AVX instructions require a VEX prefix bool emitter::TakesVexPrefix(instruction ins) const { // special case vzeroupper as it requires 2-byte VEX prefix // special case the fencing, movnti and the prefetch instructions as they never take a VEX prefix switch (ins) { case INS_lfence: case INS_mfence: case INS_movnti: case INS_prefetchnta: case INS_prefetcht0: case INS_prefetcht1: case INS_prefetcht2: case INS_sfence: case INS_vzeroupper: return false; default: break; } return IsAVXInstruction(ins); } // Add base VEX prefix without setting W, R, X, or B bits // L bit will be set based on emitter attr. // // 2-byte VEX prefix = C5 <R,vvvv,L,pp> // 3-byte VEX prefix = C4 <R,X,B,m-mmmm> <W,vvvv,L,pp> // - R, X, B, W - bits to express corresponding REX prefixes // - m-mmmmm (5-bit) // 0-00001 - implied leading 0F opcode byte // 0-00010 - implied leading 0F 38 opcode bytes // 0-00011 - implied leading 0F 3A opcode bytes // Rest - reserved for future use and usage of them will uresult in Undefined instruction exception // // - vvvv (4-bits) - register specifier in 1's complement form; must be 1111 if unused // - L - scalar or AVX-128 bit operations (L=0), 256-bit operations (L=1) // - pp (2-bits) - opcode extension providing equivalent functionality of a SIMD size prefix // these prefixes are treated mandatory when used with escape opcode 0Fh for // some SIMD instructions // 00 - None (0F - packed float) // 01 - 66 (66 0F - packed double) // 10 - F3 (F3 0F - scalar float // 11 - F2 (F2 0F - scalar double) #define DEFAULT_3BYTE_VEX_PREFIX 0xC4E07800000000ULL #define DEFAULT_3BYTE_VEX_PREFIX_MASK 0xFFFFFF00000000ULL #define LBIT_IN_3BYTE_VEX_PREFIX 0x00000400000000ULL emitter::code_t emitter::AddVexPrefix(instruction ins, code_t code, emitAttr attr) { // The 2-byte VEX encoding is preferred when possible, but actually emitting // it depends on a number of factors that we may not know until much later. // // In order to handle this "easily", we just carry the 3-byte encoding all // the way through and "fix-up" the encoding when the VEX prefix is actually // emitted, by simply checking that all the requirements were met. // Only AVX instructions require VEX prefix assert(IsAVXInstruction(ins)); // Shouldn't have already added VEX prefix assert(!hasVexPrefix(code)); assert((code & DEFAULT_3BYTE_VEX_PREFIX_MASK) == 0); code |= DEFAULT_3BYTE_VEX_PREFIX; if (attr == EA_32BYTE) { // Set L bit to 1 in case of instructions that operate on 256-bits. code |= LBIT_IN_3BYTE_VEX_PREFIX; } return code; } // Returns true if this instruction, for the given EA_SIZE(attr), will require a REX.W prefix bool emitter::TakesRexWPrefix(instruction ins, emitAttr attr) { // Because the current implementation of AVX does not have a way to distinguish between the register // size specification (128 vs. 256 bits) and the operand size specification (32 vs. 64 bits), where both are // required, the instruction must be created with the register size attribute (EA_16BYTE or EA_32BYTE), // and here we must special case these by the opcode. switch (ins) { case INS_vpermpd: case INS_vpermq: case INS_vpsrlvq: case INS_vpsllvq: case INS_pinsrq: case INS_pextrq: case INS_vfmadd132pd: case INS_vfmadd213pd: case INS_vfmadd231pd: case INS_vfmadd132sd: case INS_vfmadd213sd: case INS_vfmadd231sd: case INS_vfmaddsub132pd: case INS_vfmaddsub213pd: case INS_vfmaddsub231pd: case INS_vfmsubadd132pd: case INS_vfmsubadd213pd: case INS_vfmsubadd231pd: case INS_vfmsub132pd: case INS_vfmsub213pd: case INS_vfmsub231pd: case INS_vfmsub132sd: case INS_vfmsub213sd: case INS_vfmsub231sd: case INS_vfnmadd132pd: case INS_vfnmadd213pd: case INS_vfnmadd231pd: case INS_vfnmadd132sd: case INS_vfnmadd213sd: case INS_vfnmadd231sd: case INS_vfnmsub132pd: case INS_vfnmsub213pd: case INS_vfnmsub231pd: case INS_vfnmsub132sd: case INS_vfnmsub213sd: case INS_vfnmsub231sd: case INS_vpmaskmovq: case INS_vpgatherdq: case INS_vpgatherqq: case INS_vgatherdpd: case INS_vgatherqpd: return true; default: break; } #ifdef TARGET_AMD64 // movsx should always sign extend out to 8 bytes just because we don't track // whether the dest should be 4 bytes or 8 bytes (attr indicates the size // of the source, not the dest). // A 4-byte movzx is equivalent to an 8 byte movzx, so it is not special // cased here. if (ins == INS_movsx) { return true; } if (EA_SIZE(attr) != EA_8BYTE) { return false; } if (IsSSEOrAVXInstruction(ins)) { switch (ins) { case INS_movd: // TODO-Cleanup: replace with movq, https://github.com/dotnet/runtime/issues/47943. case INS_andn: case INS_bextr: case INS_blsi: case INS_blsmsk: case INS_blsr: case INS_bzhi: case INS_cvttsd2si: case INS_cvttss2si: case INS_cvtsd2si: case INS_cvtss2si: case INS_cvtsi2sd: case INS_cvtsi2ss: case INS_movnti: case INS_mulx: case INS_pdep: case INS_pext: case INS_rorx: return true; default: return false; } } // TODO-XArch-Cleanup: Better way to not emit REX.W when we don't need it, than just testing all these // opcodes... // These are all the instructions that default to 8-byte operand without the REX.W bit // With 1 special case: movzx because the 4 byte version still zeros-out the hi 4 bytes // so we never need it if ((ins != INS_push) && (ins != INS_pop) && (ins != INS_movq) && (ins != INS_movzx) && (ins != INS_push_hide) && (ins != INS_pop_hide) && (ins != INS_ret) && (ins != INS_call) && (ins != INS_tail_i_jmp) && !((ins >= INS_i_jmp) && (ins <= INS_l_jg))) { return true; } else { return false; } #else //! TARGET_AMD64 = TARGET_X86 return false; #endif //! TARGET_AMD64 } // Returns true if using this register will require a REX.* prefix. // Since XMM registers overlap with YMM registers, this routine // can also be used to know whether a YMM register if the // instruction in question is AVX. bool IsExtendedReg(regNumber reg) { #ifdef TARGET_AMD64 return ((reg >= REG_R8) && (reg <= REG_R15)) || ((reg >= REG_XMM8) && (reg <= REG_XMM15)); #else // X86 JIT operates in 32-bit mode and hence extended reg are not available. return false; #endif } // Returns true if using this register, for the given EA_SIZE(attr), will require a REX.* prefix bool IsExtendedReg(regNumber reg, emitAttr attr) { #ifdef TARGET_AMD64 // Not a register, so doesn't need a prefix if (reg > REG_XMM15) { return false; } // Opcode field only has 3 bits for the register, these high registers // need a 4th bit, that comes from the REX prefix (eiter REX.X, REX.R, or REX.B) if (IsExtendedReg(reg)) { return true; } if (EA_SIZE(attr) != EA_1BYTE) { return false; } // There are 12 one byte registers addressible 'below' r8b: // al, cl, dl, bl, ah, ch, dh, bh, spl, bpl, sil, dil. // The first 4 are always addressible, the last 8 are divided into 2 sets: // ah, ch, dh, bh // -- or -- // spl, bpl, sil, dil // Both sets are encoded exactly the same, the difference is the presence // of a REX prefix, even a REX prefix with no other bits set (0x40). // So in order to get to the second set we need a REX prefix (but no bits). // // TODO-AMD64-CQ: if we ever want to start using the first set, we'll need a different way of // encoding/tracking/encoding registers. return (reg >= REG_RSP); #else // X86 JIT operates in 32-bit mode and hence extended reg are not available. return false; #endif } // Since XMM registers overlap with YMM registers, this routine // can also used to know whether a YMM register in case of AVX instructions. bool IsXMMReg(regNumber reg) { #ifdef TARGET_AMD64 return (reg >= REG_XMM0) && (reg <= REG_XMM15); #else // !TARGET_AMD64 return (reg >= REG_XMM0) && (reg <= REG_XMM7); #endif // !TARGET_AMD64 } // Returns bits to be encoded in instruction for the given register. unsigned RegEncoding(regNumber reg) { static_assert((REG_XMM0 & 0x7) == 0, "bad XMMBASE"); return (unsigned)(reg & 0x7); } // Utility routines that abstract the logic of adding REX.W, REX.R, REX.X, REX.B and REX prefixes // SSE2: separate 1-byte prefix gets added before opcode. // AVX: specific bits within VEX prefix need to be set in bit-inverted form. emitter::code_t emitter::AddRexWPrefix(instruction ins, code_t code) { if (UseVEXEncoding() && IsAVXInstruction(ins)) { if (TakesVexPrefix(ins)) { // W-bit is available only in 3-byte VEX prefix that starts with byte C4. assert(hasVexPrefix(code)); // W-bit is the only bit that is added in non bit-inverted form. return emitter::code_t(code | 0x00008000000000ULL); } } #ifdef TARGET_AMD64 return emitter::code_t(code | 0x4800000000ULL); #else assert(!"UNREACHED"); return code; #endif } #ifdef TARGET_AMD64 emitter::code_t emitter::AddRexRPrefix(instruction ins, code_t code) { if (UseVEXEncoding() && IsAVXInstruction(ins)) { if (TakesVexPrefix(ins)) { // R-bit is supported by both 2-byte and 3-byte VEX prefix assert(hasVexPrefix(code)); // R-bit is added in bit-inverted form. return code & 0xFF7FFFFFFFFFFFULL; } } return code | 0x4400000000ULL; } emitter::code_t emitter::AddRexXPrefix(instruction ins, code_t code) { if (UseVEXEncoding() && IsAVXInstruction(ins)) { if (TakesVexPrefix(ins)) { // X-bit is available only in 3-byte VEX prefix that starts with byte C4. assert(hasVexPrefix(code)); // X-bit is added in bit-inverted form. return code & 0xFFBFFFFFFFFFFFULL; } } return code | 0x4200000000ULL; } emitter::code_t emitter::AddRexBPrefix(instruction ins, code_t code) { if (UseVEXEncoding() && IsAVXInstruction(ins)) { if (TakesVexPrefix(ins)) { // B-bit is available only in 3-byte VEX prefix that starts with byte C4. assert(hasVexPrefix(code)); // B-bit is added in bit-inverted form. return code & 0xFFDFFFFFFFFFFFULL; } } return code | 0x4100000000ULL; } // Adds REX prefix (0x40) without W, R, X or B bits set emitter::code_t emitter::AddRexPrefix(instruction ins, code_t code) { assert(!UseVEXEncoding() || !IsAVXInstruction(ins)); return code | 0x4000000000ULL; } #endif // TARGET_AMD64 bool isPrefix(BYTE b) { assert(b != 0); // Caller should check this assert(b != 0x67); // We don't use the address size prefix assert(b != 0x65); // The GS segment override prefix is emitted separately assert(b != 0x64); // The FS segment override prefix is emitted separately assert(b != 0xF0); // The lock prefix is emitted separately assert(b != 0x2E); // We don't use the CS segment override prefix assert(b != 0x3E); // Or the DS segment override prefix assert(b != 0x26); // Or the ES segment override prefix assert(b != 0x36); // Or the SS segment override prefix // That just leaves the size prefixes used in SSE opcodes: // Scalar Double Scalar Single Packed Double return ((b == 0xF2) || (b == 0xF3) || (b == 0x66)); } // Outputs VEX prefix (in case of AVX instructions) and REX.R/X/W/B otherwise. unsigned emitter::emitOutputRexOrVexPrefixIfNeeded(instruction ins, BYTE* dst, code_t& code) { if (hasVexPrefix(code)) { // Only AVX instructions should have a VEX prefix assert(UseVEXEncoding() && IsAVXInstruction(ins)); code_t vexPrefix = (code >> 32) & 0x00FFFFFF; code &= 0x00000000FFFFFFFFLL; WORD leadingBytes = 0; BYTE check = (code >> 24) & 0xFF; if (check != 0) { // 3-byte opcode: with the bytes ordered as 0x2211RM33 or // 4-byte opcode: with the bytes ordered as 0x22114433 // check for a prefix in the 11 position BYTE sizePrefix = (code >> 16) & 0xFF; if ((sizePrefix != 0) && isPrefix(sizePrefix)) { // 'pp' bits in byte2 of VEX prefix allows us to encode SIMD size prefixes as two bits // // 00 - None (0F - packed float) // 01 - 66 (66 0F - packed double) // 10 - F3 (F3 0F - scalar float // 11 - F2 (F2 0F - scalar double) switch (sizePrefix) { case 0x66: if (IsBMIInstruction(ins)) { switch (ins) { case INS_rorx: case INS_pdep: case INS_mulx: { vexPrefix |= 0x03; break; } case INS_pext: { vexPrefix |= 0x02; break; } default: { vexPrefix |= 0x00; break; } } } else { vexPrefix |= 0x01; } break; case 0xF3: vexPrefix |= 0x02; break; case 0xF2: vexPrefix |= 0x03; break; default: assert(!"unrecognized SIMD size prefix"); unreached(); } // Now the byte in the 22 position must be an escape byte 0F leadingBytes = check; assert(leadingBytes == 0x0F); // Get rid of both sizePrefix and escape byte code &= 0x0000FFFFLL; // Check the byte in the 33 position to see if it is 3A or 38. // In such a case escape bytes must be 0x0F3A or 0x0F38 check = code & 0xFF; if (check == 0x3A || check == 0x38) { leadingBytes = (leadingBytes << 8) | check; code &= 0x0000FF00LL; } } } else { // 2-byte opcode with the bytes ordered as 0x0011RM22 // the byte in position 11 must be an escape byte. leadingBytes = (code >> 16) & 0xFF; assert(leadingBytes == 0x0F || leadingBytes == 0x00); code &= 0xFFFF; } // If there is an escape byte it must be 0x0F or 0x0F3A or 0x0F38 // m-mmmmm bits in byte 1 of VEX prefix allows us to encode these // implied leading bytes. 0x0F is supported by both the 2-byte and // 3-byte encoding. While 0x0F3A and 0x0F38 are only supported by // the 3-byte version. switch (leadingBytes) { case 0x00: // there is no leading byte break; case 0x0F: vexPrefix |= 0x0100; break; case 0x0F38: vexPrefix |= 0x0200; break; case 0x0F3A: vexPrefix |= 0x0300; break; default: assert(!"encountered unknown leading bytes"); unreached(); } // At this point // VEX.2211RM33 got transformed as VEX.0000RM33 // VEX.0011RM22 got transformed as VEX.0000RM22 // // Now output VEX prefix leaving the 4-byte opcode // The 2-byte VEX encoding, requires that the X and B-bits are set (these // bits are inverted from the REX values so set means off), the W-bit is // not set (this bit is not inverted), and that the m-mmmm bits are 0-0001 // (the 2-byte VEX encoding only supports the 0x0F leading byte). When these // conditions are met, we can change byte-0 from 0xC4 to 0xC5 and then // byte-1 is the logical-or of bit 7 from byte-1 and bits 0-6 from byte 2 // from the 3-byte VEX encoding. // // Given the above, the check can be reduced to a simple mask and comparison. // * 0xFFFF7F80 is a mask that ignores any bits whose value we don't care about: // * R can be set or unset (0x7F ignores bit 7) // * vvvv can be any value (0x80 ignores bits 3-6) // * L can be set or unset (0x80 ignores bit 2) // * pp can be any value (0x80 ignores bits 0-1) // * 0x00C46100 is a value that signifies the requirements listed above were met: // * We must be a three-byte VEX opcode (0x00C4) // * X and B must be set (0x61 validates bits 5-6) // * m-mmmm must be 0-00001 (0x61 validates bits 0-4) // * W must be unset (0x00 validates bit 7) if ((vexPrefix & 0xFFFF7F80) == 0x00C46100) { // Encoding optimization calculation is not done while estimating the instruction // size and thus over-predict instruction size by 1 byte. // If there are IGs that will be aligned, do not optimize encoding so the // estimated alignment sizes are accurate. if (emitCurIG->igNum > emitLastAlignedIgNum) { emitOutputByte(dst, 0xC5); emitOutputByte(dst + 1, ((vexPrefix >> 8) & 0x80) | (vexPrefix & 0x7F)); return 2; } } emitOutputByte(dst, ((vexPrefix >> 16) & 0xFF)); emitOutputByte(dst + 1, ((vexPrefix >> 8) & 0xFF)); emitOutputByte(dst + 2, vexPrefix & 0xFF); return 3; } #ifdef TARGET_AMD64 if (code > 0x00FFFFFFFFLL) { BYTE prefix = (code >> 32) & 0xFF; noway_assert(prefix >= 0x40 && prefix <= 0x4F); code &= 0x00000000FFFFFFFFLL; // TODO-AMD64-Cleanup: when we remove the prefixes (just the SSE opcodes right now) // we can remove this code as well // The REX prefix is required to come after all other prefixes. // Some of our 'opcodes' actually include some prefixes, if that // is the case, shift them over and place the REX prefix after // the other prefixes, and emit any prefix that got moved out. BYTE check = (code >> 24) & 0xFF; if (check == 0) { // 3-byte opcode: with the bytes ordered as 0x00113322 // check for a prefix in the 11 position check = (code >> 16) & 0xFF; if (check != 0 && isPrefix(check)) { // Swap the rex prefix and whatever this prefix is code = (((DWORD)prefix << 16) | (code & 0x0000FFFFLL)); // and then emit the other prefix return emitOutputByte(dst, check); } } else { // 4-byte opcode with the bytes ordered as 0x22114433 // first check for a prefix in the 11 position BYTE check2 = (code >> 16) & 0xFF; if (isPrefix(check2)) { assert(!isPrefix(check)); // We currently don't use this, so it is untested if (isPrefix(check)) { // 3 prefixes were rex = rr, check = c1, check2 = c2 encoded as 0xrrc1c2XXXX // Change to c2rrc1XXXX, and emit check2 now code = (((code_t)prefix << 24) | ((code_t)check << 16) | (code & 0x0000FFFFLL)); } else { // 2 prefixes were rex = rr, check2 = c2 encoded as 0xrrXXc2XXXX, (check is part of the opcode) // Change to c2XXrrXXXX, and emit check2 now code = (((code_t)check << 24) | ((code_t)prefix << 16) | (code & 0x0000FFFFLL)); } return emitOutputByte(dst, check2); } } return emitOutputByte(dst, prefix); } #endif // TARGET_AMD64 return 0; } #ifdef TARGET_AMD64 /***************************************************************************** * Is the last instruction emitted a call instruction? */ bool emitter::emitIsLastInsCall() { if ((emitLastIns != nullptr) && (emitLastIns->idIns() == INS_call)) { return true; } return false; } /***************************************************************************** * We're about to create an epilog. If the last instruction we output was a 'call', * then we need to insert a NOP, to allow for proper exception-handling behavior. */ void emitter::emitOutputPreEpilogNOP() { if (emitIsLastInsCall()) { emitIns(INS_nop); } } #endif // TARGET_AMD64 // Size of rex prefix in bytes unsigned emitter::emitGetRexPrefixSize(instruction ins) { // In case of AVX instructions, REX prefixes are part of VEX prefix. // And hence requires no additional byte to encode REX prefixes. if (IsAVXInstruction(ins)) { return 0; } // If not AVX, then we would need 1-byte to encode REX prefix. return 1; } // Size of vex prefix in bytes unsigned emitter::emitGetVexPrefixSize(instruction ins, emitAttr attr) { if (IsAVXInstruction(ins)) { return 3; } // If not AVX, then we don't need to encode vex prefix. return 0; } //------------------------------------------------------------------------ // emitGetAdjustedSize: Determines any size adjustment needed for a given instruction based on the current // configuration. // // Arguments: // ins -- The instruction being emitted // attr -- The emit attribute // code -- The current opcode and any known prefixes unsigned emitter::emitGetAdjustedSize(instruction ins, emitAttr attr, code_t code) { unsigned adjustedSize = 0; if (IsAVXInstruction(ins)) { // VEX prefix encodes some bytes of the opcode and as a result, overall size of the instruction reduces. // Therefore, to estimate the size adding VEX prefix size and size of instruction opcode bytes will always // overstimate. // Instead this routine will adjust the size of VEX prefix based on the number of bytes of opcode it encodes so // that // instruction size estimate will be accurate. // Basically this will decrease the vexPrefixSize, so that opcodeSize + vexPrefixAdjustedSize will be the right // size. // // rightOpcodeSize + vexPrefixSize // = (opcodeSize - ExtrabytesSize) + vexPrefixSize // = opcodeSize + (vexPrefixSize - ExtrabytesSize) // = opcodeSize + vexPrefixAdjustedSize unsigned vexPrefixAdjustedSize = emitGetVexPrefixSize(ins, attr); assert(vexPrefixAdjustedSize == 3); // In this case, opcode will contains escape prefix at least one byte, // vexPrefixAdjustedSize should be minus one. vexPrefixAdjustedSize -= 1; // Get the fourth byte in Opcode. // If this byte is non-zero, then we should check whether the opcode contains SIMD prefix or not. BYTE check = (code >> 24) & 0xFF; if (check != 0) { // 3-byte opcode: with the bytes ordered as 0x2211RM33 or // 4-byte opcode: with the bytes ordered as 0x22114433 // Simd prefix is at the first byte. BYTE sizePrefix = (code >> 16) & 0xFF; if (sizePrefix != 0 && isPrefix(sizePrefix)) { vexPrefixAdjustedSize -= 1; } // If the opcode size is 4 bytes, then the second escape prefix is at fourth byte in opcode. // But in this case the opcode has not counted R\M part. // opcodeSize + VexPrefixAdjustedSize - ExtraEscapePrefixSize + ModR\MSize //=opcodeSize + VexPrefixAdjustedSize -1 + 1 //=opcodeSize + VexPrefixAdjustedSize // So although we may have second byte escape prefix, we won't decrease vexPrefixAdjustedSize. } adjustedSize = vexPrefixAdjustedSize; } else if (Is4ByteSSEInstruction(ins)) { // The 4-Byte SSE instructions require one additional byte to hold the ModRM byte adjustedSize++; } else { if (ins == INS_crc32) { // Adjust code size for CRC32 that has 4-byte opcode but does not use SSE38 or EES3A encoding. adjustedSize++; } if ((attr == EA_2BYTE) && (ins != INS_movzx) && (ins != INS_movsx)) { // Most 16-bit operand instructions will need a 0x66 prefix. adjustedSize++; } } return adjustedSize; } // //------------------------------------------------------------------------ // emitGetPrefixSize: Get size of rex or vex prefix emitted in code // // Arguments: // code -- The current opcode and any known prefixes // includeRexPrefixSize -- If Rex Prefix size should be included or not // unsigned emitter::emitGetPrefixSize(code_t code, bool includeRexPrefixSize) { if (hasVexPrefix(code)) { return 3; } if (includeRexPrefixSize && hasRexPrefix(code)) { return 1; } return 0; } #ifdef TARGET_X86 /***************************************************************************** * * Record a non-empty stack */ void emitter::emitMarkStackLvl(unsigned stackLevel) { assert(int(stackLevel) >= 0); assert(emitCurStackLvl == 0); assert(emitCurIG->igStkLvl == 0); assert(emitCurIGfreeNext == emitCurIGfreeBase); assert(stackLevel && stackLevel % sizeof(int) == 0); emitCurStackLvl = emitCurIG->igStkLvl = stackLevel; if (emitMaxStackDepth < emitCurStackLvl) { JITDUMP("Upping emitMaxStackDepth from %d to %d\n", emitMaxStackDepth, emitCurStackLvl); emitMaxStackDepth = emitCurStackLvl; } } #endif /***************************************************************************** * * Get hold of the address mode displacement value for an indirect call. */ inline ssize_t emitter::emitGetInsCIdisp(instrDesc* id) { if (id->idIsLargeCall()) { return ((instrDescCGCA*)id)->idcDisp; } else { assert(!id->idIsLargeDsp()); assert(!id->idIsLargeCns()); return id->idAddr()->iiaAddrMode.amDisp; } } /** *************************************************************************** * * The following table is used by the instIsFP()/instUse/DefFlags() helpers. */ // clang-format off const insFlags CodeGenInterface::instInfo[] = { #define INST0(id, nm, um, mr, flags) static_cast<insFlags>(flags), #define INST1(id, nm, um, mr, flags) static_cast<insFlags>(flags), #define INST2(id, nm, um, mr, mi, flags) static_cast<insFlags>(flags), #define INST3(id, nm, um, mr, mi, rm, flags) static_cast<insFlags>(flags), #define INST4(id, nm, um, mr, mi, rm, a4, flags) static_cast<insFlags>(flags), #define INST5(id, nm, um, mr, mi, rm, a4, rr, flags) static_cast<insFlags>(flags), #include "instrs.h" #undef INST0 #undef INST1 #undef INST2 #undef INST3 #undef INST4 #undef INST5 }; // clang-format on /***************************************************************************** * * Initialize the table used by emitInsModeFormat(). */ // clang-format off const BYTE emitter::emitInsModeFmtTab[] = { #define INST0(id, nm, um, mr, flags) um, #define INST1(id, nm, um, mr, flags) um, #define INST2(id, nm, um, mr, mi, flags) um, #define INST3(id, nm, um, mr, mi, rm, flags) um, #define INST4(id, nm, um, mr, mi, rm, a4, flags) um, #define INST5(id, nm, um, mr, mi, rm, a4, rr, flags) um, #include "instrs.h" #undef INST0 #undef INST1 #undef INST2 #undef INST3 #undef INST4 #undef INST5 }; // clang-format on #ifdef DEBUG unsigned const emitter::emitInsModeFmtCnt = ArrLen(emitInsModeFmtTab); #endif /***************************************************************************** * * Combine the given base format with the update mode of the instuction. */ inline emitter::insFormat emitter::emitInsModeFormat(instruction ins, insFormat base) { assert(IF_RRD + IUM_RD == IF_RRD); assert(IF_RRD + IUM_WR == IF_RWR); assert(IF_RRD + IUM_RW == IF_RRW); return (insFormat)(base + emitInsUpdateMode(ins)); } // This is a helper we need due to Vs Whidbey #254016 in order to distinguish // if we can not possibly be updating an integer register. This is not the best // solution, but the other ones (see bug) are going to be much more complicated. bool emitter::emitInsCanOnlyWriteSSE2OrAVXReg(instrDesc* id) { instruction ins = id->idIns(); if (!IsSSEOrAVXInstruction(ins)) { return false; } switch (ins) { case INS_andn: case INS_bextr: case INS_blsi: case INS_blsmsk: case INS_blsr: case INS_bzhi: case INS_cvttsd2si: case INS_cvttss2si: case INS_cvtsd2si: case INS_cvtss2si: case INS_extractps: case INS_movd: case INS_movmskpd: case INS_movmskps: case INS_mulx: case INS_pdep: case INS_pext: case INS_pmovmskb: case INS_pextrb: case INS_pextrd: case INS_pextrq: case INS_pextrw: case INS_pextrw_sse41: case INS_rorx: { // These SSE instructions write to a general purpose integer register. return false; } default: { return true; } } } /***************************************************************************** * * Returns the base encoding of the given CPU instruction. */ inline size_t insCode(instruction ins) { // clang-format off const static size_t insCodes[] = { #define INST0(id, nm, um, mr, flags) mr, #define INST1(id, nm, um, mr, flags) mr, #define INST2(id, nm, um, mr, mi, flags) mr, #define INST3(id, nm, um, mr, mi, rm, flags) mr, #define INST4(id, nm, um, mr, mi, rm, a4, flags) mr, #define INST5(id, nm, um, mr, mi, rm, a4, rr, flags) mr, #include "instrs.h" #undef INST0 #undef INST1 #undef INST2 #undef INST3 #undef INST4 #undef INST5 }; // clang-format on assert((unsigned)ins < ArrLen(insCodes)); assert((insCodes[ins] != BAD_CODE)); return insCodes[ins]; } /***************************************************************************** * * Returns the "AL/AX/EAX, imm" accumulator encoding of the given instruction. */ inline size_t insCodeACC(instruction ins) { // clang-format off const static size_t insCodesACC[] = { #define INST0(id, nm, um, mr, flags) #define INST1(id, nm, um, mr, flags) #define INST2(id, nm, um, mr, mi, flags) #define INST3(id, nm, um, mr, mi, rm, flags) #define INST4(id, nm, um, mr, mi, rm, a4, flags) a4, #define INST5(id, nm, um, mr, mi, rm, a4, rr, flags) a4, #include "instrs.h" #undef INST0 #undef INST1 #undef INST2 #undef INST3 #undef INST4 #undef INST5 }; // clang-format on assert((unsigned)ins < ArrLen(insCodesACC)); assert((insCodesACC[ins] != BAD_CODE)); return insCodesACC[ins]; } /***************************************************************************** * * Returns the "register" encoding of the given CPU instruction. */ inline size_t insCodeRR(instruction ins) { // clang-format off const static size_t insCodesRR[] = { #define INST0(id, nm, um, mr, flags) #define INST1(id, nm, um, mr, flags) #define INST2(id, nm, um, mr, mi, flags) #define INST3(id, nm, um, mr, mi, rm, flags) #define INST4(id, nm, um, mr, mi, rm, a4, flags) #define INST5(id, nm, um, mr, mi, rm, a4, rr, flags) rr, #include "instrs.h" #undef INST0 #undef INST1 #undef INST2 #undef INST3 #undef INST4 #undef INST5 }; // clang-format on assert((unsigned)ins < ArrLen(insCodesRR)); assert((insCodesRR[ins] != BAD_CODE)); return insCodesRR[ins]; } // clang-format off const static size_t insCodesRM[] = { #define INST0(id, nm, um, mr, flags) #define INST1(id, nm, um, mr, flags) #define INST2(id, nm, um, mr, mi, flags) #define INST3(id, nm, um, mr, mi, rm, flags) rm, #define INST4(id, nm, um, mr, mi, rm, a4, flags) rm, #define INST5(id, nm, um, mr, mi, rm, a4, rr, flags) rm, #include "instrs.h" #undef INST0 #undef INST1 #undef INST2 #undef INST3 #undef INST4 #undef INST5 }; // clang-format on // Returns true iff the give CPU instruction has an RM encoding. inline bool hasCodeRM(instruction ins) { assert((unsigned)ins < ArrLen(insCodesRM)); return ((insCodesRM[ins] != BAD_CODE)); } /***************************************************************************** * * Returns the "reg, [r/m]" encoding of the given CPU instruction. */ inline size_t insCodeRM(instruction ins) { assert((unsigned)ins < ArrLen(insCodesRM)); assert((insCodesRM[ins] != BAD_CODE)); return insCodesRM[ins]; } // clang-format off const static size_t insCodesMI[] = { #define INST0(id, nm, um, mr, flags) #define INST1(id, nm, um, mr, flags) #define INST2(id, nm, um, mr, mi, flags) mi, #define INST3(id, nm, um, mr, mi, rm, flags) mi, #define INST4(id, nm, um, mr, mi, rm, a4, flags) mi, #define INST5(id, nm, um, mr, mi, rm, a4, rr, flags) mi, #include "instrs.h" #undef INST0 #undef INST1 #undef INST2 #undef INST3 #undef INST4 #undef INST5 }; // clang-format on // Returns true iff the give CPU instruction has an MI encoding. inline bool hasCodeMI(instruction ins) { assert((unsigned)ins < ArrLen(insCodesMI)); return ((insCodesMI[ins] != BAD_CODE)); } /***************************************************************************** * * Returns the "[r/m], 32-bit icon" encoding of the given CPU instruction. */ inline size_t insCodeMI(instruction ins) { assert((unsigned)ins < ArrLen(insCodesMI)); assert((insCodesMI[ins] != BAD_CODE)); return insCodesMI[ins]; } // clang-format off const static size_t insCodesMR[] = { #define INST0(id, nm, um, mr, flags) #define INST1(id, nm, um, mr, flags) mr, #define INST2(id, nm, um, mr, mi, flags) mr, #define INST3(id, nm, um, mr, mi, rm, flags) mr, #define INST4(id, nm, um, mr, mi, rm, a4, flags) mr, #define INST5(id, nm, um, mr, mi, rm, a4, rr, flags) mr, #include "instrs.h" #undef INST0 #undef INST1 #undef INST2 #undef INST3 #undef INST4 #undef INST5 }; // clang-format on // Returns true iff the give CPU instruction has an MR encoding. inline bool hasCodeMR(instruction ins) { assert((unsigned)ins < ArrLen(insCodesMR)); return ((insCodesMR[ins] != BAD_CODE)); } /***************************************************************************** * * Returns the "[r/m], reg" or "[r/m]" encoding of the given CPU instruction. */ inline size_t insCodeMR(instruction ins) { assert((unsigned)ins < ArrLen(insCodesMR)); assert((insCodesMR[ins] != BAD_CODE)); return insCodesMR[ins]; } // Return true if the instruction uses the SSE38 or SSE3A macro in instrsXArch.h. bool emitter::EncodedBySSE38orSSE3A(instruction ins) { const size_t SSE38 = 0x0F660038; const size_t SSE3A = 0x0F66003A; const size_t MASK = 0xFFFF00FF; size_t insCode = 0; if (!IsSSEOrAVXInstruction(ins)) { return false; } if (hasCodeRM(ins)) { insCode = insCodeRM(ins); } else if (hasCodeMI(ins)) { insCode = insCodeMI(ins); } else if (hasCodeMR(ins)) { insCode = insCodeMR(ins); } insCode &= MASK; return insCode == SSE38 || insCode == SSE3A; } /***************************************************************************** * * Returns an encoding for the specified register to be used in the bit0-2 * part of an opcode. */ inline unsigned emitter::insEncodeReg012(instruction ins, regNumber reg, emitAttr size, code_t* code) { assert(reg < REG_STK); #ifdef TARGET_AMD64 // Either code is not NULL or reg is not an extended reg. // If reg is an extended reg, instruction needs to be prefixed with 'REX' // which would require code != NULL. assert(code != nullptr || !IsExtendedReg(reg)); if (IsExtendedReg(reg)) { *code = AddRexBPrefix(ins, *code); // REX.B } else if ((EA_SIZE(size) == EA_1BYTE) && (reg > REG_RBX) && (code != nullptr)) { // We are assuming that we only use/encode SPL, BPL, SIL and DIL // not the corresponding AH, CH, DH, or BH *code = AddRexPrefix(ins, *code); // REX } #endif // TARGET_AMD64 unsigned regBits = RegEncoding(reg); assert(regBits < 8); return regBits; } /***************************************************************************** * * Returns an encoding for the specified register to be used in the bit3-5 * part of an opcode. */ inline unsigned emitter::insEncodeReg345(instruction ins, regNumber reg, emitAttr size, code_t* code) { assert(reg < REG_STK); #ifdef TARGET_AMD64 // Either code is not NULL or reg is not an extended reg. // If reg is an extended reg, instruction needs to be prefixed with 'REX' // which would require code != NULL. assert(code != nullptr || !IsExtendedReg(reg)); if (IsExtendedReg(reg)) { *code = AddRexRPrefix(ins, *code); // REX.R } else if ((EA_SIZE(size) == EA_1BYTE) && (reg > REG_RBX) && (code != nullptr)) { // We are assuming that we only use/encode SPL, BPL, SIL and DIL // not the corresponding AH, CH, DH, or BH *code = AddRexPrefix(ins, *code); // REX } #endif // TARGET_AMD64 unsigned regBits = RegEncoding(reg); assert(regBits < 8); return (regBits << 3); } /*********************************************************************************** * * Returns modified AVX opcode with the specified register encoded in bits 3-6 of * byte 2 of VEX prefix. */ inline emitter::code_t emitter::insEncodeReg3456(instruction ins, regNumber reg, emitAttr size, code_t code) { assert(reg < REG_STK); assert(IsAVXInstruction(ins)); assert(hasVexPrefix(code)); // Get 4-bit register encoding // RegEncoding() gives lower 3 bits // IsExtendedReg() gives MSB. code_t regBits = RegEncoding(reg); if (IsExtendedReg(reg)) { regBits |= 0x08; } // VEX prefix encodes register operand in 1's complement form // Shift count = 4-bytes of opcode + 0-2 bits assert(regBits <= 0xF); regBits <<= 35; return code ^ regBits; } /***************************************************************************** * * Returns an encoding for the specified register to be used in the bit3-5 * part of an SIB byte (unshifted). * Used exclusively to generate the REX.X bit and truncate the register. */ inline unsigned emitter::insEncodeRegSIB(instruction ins, regNumber reg, code_t* code) { assert(reg < REG_STK); #ifdef TARGET_AMD64 // Either code is not NULL or reg is not an extended reg. // If reg is an extended reg, instruction needs to be prefixed with 'REX' // which would require code != NULL. assert(code != nullptr || reg < REG_R8 || (reg >= REG_XMM0 && reg < REG_XMM8)); if (IsExtendedReg(reg)) { *code = AddRexXPrefix(ins, *code); // REX.X } unsigned regBits = RegEncoding(reg); #else // !TARGET_AMD64 unsigned regBits = reg; #endif // !TARGET_AMD64 assert(regBits < 8); return regBits; } /***************************************************************************** * * Returns the "[r/m]" opcode with the mod/RM field set to register. */ inline emitter::code_t emitter::insEncodeMRreg(instruction ins, code_t code) { // If Byte 4 (which is 0xFF00) is 0, that's where the RM encoding goes. // Otherwise, it will be placed after the 4 byte encoding. if ((code & 0xFF00) == 0) { assert((code & 0xC000) == 0); code |= 0xC000; } return code; } /***************************************************************************** * * Returns the given "[r/m]" opcode with the mod/RM field set to register. */ inline emitter::code_t emitter::insEncodeRMreg(instruction ins, code_t code) { // If Byte 4 (which is 0xFF00) is 0, that's where the RM encoding goes. // Otherwise, it will be placed after the 4 byte encoding. if ((code & 0xFF00) == 0) { assert((code & 0xC000) == 0); code |= 0xC000; } return code; } /***************************************************************************** * * Returns the "byte ptr [r/m]" opcode with the mod/RM field set to * the given register. */ inline emitter::code_t emitter::insEncodeMRreg(instruction ins, regNumber reg, emitAttr size, code_t code) { assert((code & 0xC000) == 0); code |= 0xC000; unsigned regcode = insEncodeReg012(ins, reg, size, &code) << 8; code |= regcode; return code; } /***************************************************************************** * * Returns the "byte ptr [r/m], icon" opcode with the mod/RM field set to * the given register. */ inline emitter::code_t emitter::insEncodeMIreg(instruction ins, regNumber reg, emitAttr size, code_t code) { assert((code & 0xC000) == 0); code |= 0xC000; unsigned regcode = insEncodeReg012(ins, reg, size, &code) << 8; code |= regcode; return code; } /***************************************************************************** * * Returns true iff the given instruction does not have a "[r/m], icon" form, but *does* have a * "reg,reg,imm8" form. */ inline bool insNeedsRRIb(instruction ins) { // If this list gets longer, use a switch or a table. return ins == INS_imul; } /***************************************************************************** * * Returns the "reg,reg,imm8" opcode with both the reg's set to the * the given register. */ inline emitter::code_t emitter::insEncodeRRIb(instruction ins, regNumber reg, emitAttr size) { assert(size == EA_4BYTE); // All we handle for now. assert(insNeedsRRIb(ins)); // If this list gets longer, use a switch, or a table lookup. code_t code = 0x69c0; unsigned regcode = insEncodeReg012(ins, reg, size, &code); // We use the same register as source and destination. (Could have another version that does both regs...) code |= regcode; code |= (regcode << 3); return code; } /***************************************************************************** * * Returns the "+reg" opcode with the the given register set into the low * nibble of the opcode */ inline emitter::code_t emitter::insEncodeOpreg(instruction ins, regNumber reg, emitAttr size) { code_t code = insCodeRR(ins); unsigned regcode = insEncodeReg012(ins, reg, size, &code); code |= regcode; return code; } /***************************************************************************** * * Return the 'SS' field value for the given index scale factor. */ inline unsigned emitter::insSSval(unsigned scale) { assert(scale == 1 || scale == 2 || scale == 4 || scale == 8); const static BYTE scales[] = { 0x00, // 1 0x40, // 2 0xFF, // 3 0x80, // 4 0xFF, // 5 0xFF, // 6 0xFF, // 7 0xC0, // 8 }; return scales[scale - 1]; } const instruction emitJumpKindInstructions[] = {INS_nop, #define JMP_SMALL(en, rev, ins) INS_##ins, #include "emitjmps.h" INS_call}; const emitJumpKind emitReverseJumpKinds[] = { EJ_NONE, #define JMP_SMALL(en, rev, ins) EJ_##rev, #include "emitjmps.h" }; /***************************************************************************** * Look up the instruction for a jump kind */ /*static*/ instruction emitter::emitJumpKindToIns(emitJumpKind jumpKind) { assert((unsigned)jumpKind < ArrLen(emitJumpKindInstructions)); return emitJumpKindInstructions[jumpKind]; } /***************************************************************************** * Reverse the conditional jump */ /* static */ emitJumpKind emitter::emitReverseJumpKind(emitJumpKind jumpKind) { assert(jumpKind < EJ_COUNT); return emitReverseJumpKinds[jumpKind]; } /***************************************************************************** * The size for these instructions is less than EA_4BYTE, * but the target register need not be byte-addressable */ inline bool emitInstHasNoCode(instruction ins) { if (ins == INS_align) { return true; } return false; } /***************************************************************************** * When encoding instructions that operate on byte registers * we have to ensure that we use a low register (EAX, EBX, ECX or EDX) * otherwise we will incorrectly encode the instruction */ bool emitter::emitVerifyEncodable(instruction ins, emitAttr size, regNumber reg1, regNumber reg2 /* = REG_NA */) { #if CPU_HAS_BYTE_REGS if (size != EA_1BYTE) // Not operating on a byte register is fine { return true; } if ((ins != INS_movsx) && // These three instructions support high register (ins != INS_movzx) // encodings for reg1 #ifdef FEATURE_HW_INTRINSICS && (ins != INS_crc32) #endif ) { // reg1 must be a byte-able register if ((genRegMask(reg1) & RBM_BYTE_REGS) == 0) { return false; } } // if reg2 is not REG_NA then reg2 must be a byte-able register if ((reg2 != REG_NA) && ((genRegMask(reg2) & RBM_BYTE_REGS) == 0)) { return false; } #endif // The instruction can be encoded return true; } //------------------------------------------------------------------------ // emitInsSize: Estimate the size (in bytes of generated code) of the given instruction. // // Arguments: // code -- The current opcode and any known prefixes // includeRexPrefixSize -- If Rex Prefix size should be included or not // inline UNATIVE_OFFSET emitter::emitInsSize(code_t code, bool includeRexPrefixSize) { UNATIVE_OFFSET size = (code & 0xFF000000) ? 4 : (code & 0x00FF0000) ? 3 : 2; #ifdef TARGET_AMD64 size += emitGetPrefixSize(code, includeRexPrefixSize); #endif return size; } //------------------------------------------------------------------------ // emitInsSizeRR: Determines the code size for an instruction encoding that does not have any addressing modes // // Arguments: // ins -- The instruction being emitted // code -- The current opcode and any known prefixes inline UNATIVE_OFFSET emitter::emitInsSizeRR(instrDesc* id, code_t code) { assert(id->idIns() != INS_invalid); instruction ins = id->idIns(); emitAttr attr = id->idOpSize(); UNATIVE_OFFSET sz = emitGetAdjustedSize(ins, attr, code); bool includeRexPrefixSize = true; // REX prefix if (TakesRexWPrefix(ins, attr) || IsExtendedReg(id->idReg1(), attr) || IsExtendedReg(id->idReg2(), attr) || (!id->idIsSmallDsc() && (IsExtendedReg(id->idReg3(), attr) || IsExtendedReg(id->idReg4(), attr)))) { sz += emitGetRexPrefixSize(ins); includeRexPrefixSize = !IsAVXInstruction(ins); } sz += emitInsSize(code, includeRexPrefixSize); return sz; } //------------------------------------------------------------------------ // emitInsSizeRR: Determines the code size for an instruction encoding that does not have any addressing modes and // includes an immediate value // // Arguments: // ins -- The instruction being emitted // code -- The current opcode and any known prefixes // val -- The immediate value to encode inline UNATIVE_OFFSET emitter::emitInsSizeRR(instrDesc* id, code_t code, int val) { instruction ins = id->idIns(); UNATIVE_OFFSET valSize = EA_SIZE_IN_BYTES(id->idOpSize()); bool valInByte = ((signed char)val == val) && (ins != INS_mov) && (ins != INS_test); #ifdef TARGET_AMD64 // mov reg, imm64 is the only opcode which takes a full 8 byte immediate // all other opcodes take a sign-extended 4-byte immediate noway_assert(valSize <= sizeof(INT32) || !id->idIsCnsReloc()); #endif // TARGET_AMD64 if (valSize > sizeof(INT32)) { valSize = sizeof(INT32); } if (id->idIsCnsReloc()) { valInByte = false; // relocs can't be placed in a byte assert(valSize == sizeof(INT32)); } if (valInByte) { valSize = sizeof(char); } else { assert(!IsSSEOrAVXInstruction(ins)); } return valSize + emitInsSizeRR(id, code); } inline UNATIVE_OFFSET emitter::emitInsSizeRR(instruction ins, regNumber reg1, regNumber reg2, emitAttr attr) { emitAttr size = EA_SIZE(attr); // If Byte 4 (which is 0xFF00) is zero, that's where the RM encoding goes. // Otherwise, it will be placed after the 4 byte encoding, making the total 5 bytes. // This would probably be better expressed as a different format or something? code_t code = insCodeRM(ins); UNATIVE_OFFSET sz = emitGetAdjustedSize(ins, size, insCodeRM(ins)); bool includeRexPrefixSize = true; // REX prefix if (!hasRexPrefix(code)) { if ((TakesRexWPrefix(ins, size) && ((ins != INS_xor) || (reg1 != reg2))) || IsExtendedReg(reg1, attr) || IsExtendedReg(reg2, attr)) { sz += emitGetRexPrefixSize(ins); includeRexPrefixSize = false; } } if ((code & 0xFF00) != 0) { sz += IsSSEOrAVXInstruction(ins) ? emitInsSize(code, includeRexPrefixSize) : 5; } else { sz += emitInsSize(insEncodeRMreg(ins, code), includeRexPrefixSize); } return sz; } /*****************************************************************************/ inline UNATIVE_OFFSET emitter::emitInsSizeSV(code_t code, int var, int dsp) { UNATIVE_OFFSET size = emitInsSize(code, /* includeRexPrefixSize */ true); UNATIVE_OFFSET offs; bool offsIsUpperBound = true; bool EBPbased = true; /* Is this a temporary? */ if (var < 0) { /* An address off of ESP takes an extra byte */ if (!emitHasFramePtr) { size++; } // The offset is already assigned. Find the temp. TempDsc* tmp = codeGen->regSet.tmpFindNum(var, RegSet::TEMP_USAGE_USED); if (tmp == nullptr) { // It might be in the free lists, if we're working on zero initializing the temps. tmp = codeGen->regSet.tmpFindNum(var, RegSet::TEMP_USAGE_FREE); } assert(tmp != nullptr); offs = tmp->tdTempOffs(); // We only care about the magnitude of the offset here, to determine instruction size. if (emitComp->isFramePointerUsed()) { if ((int)offs < 0) { offs = -(int)offs; } } else { // SP-based offsets must already be positive. assert((int)offs >= 0); } } else { /* Get the frame offset of the (non-temp) variable */ offs = dsp + emitComp->lvaFrameAddress(var, &EBPbased); /* An address off of ESP takes an extra byte */ if (!EBPbased) { ++size; } /* Is this a stack parameter reference? */ if ((emitComp->lvaIsParameter(var) #if !defined(TARGET_AMD64) || defined(UNIX_AMD64_ABI) && !emitComp->lvaIsRegArgument(var) #endif // !TARGET_AMD64 || UNIX_AMD64_ABI ) || (static_cast<unsigned>(var) == emitComp->lvaRetAddrVar)) { /* If no EBP frame, arguments and ret addr are off of ESP, above temps */ if (!EBPbased) { assert((int)offs >= 0); offsIsUpperBound = false; // since #temps can increase offs += emitMaxTmpSize; } } else { /* Locals off of EBP are at negative offsets */ if (EBPbased) { #if defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI) // If localloc is not used, then ebp chaining is done and hence // offset of locals will be at negative offsets, Otherwise offsets // will be positive. In future, when RBP gets positioned in the // middle of the frame so as to optimize instruction encoding size, // the below asserts needs to be modified appropriately. // However, for Unix platforms, we always do frame pointer chaining, // so offsets from the frame pointer will always be negative. if (emitComp->compLocallocUsed || emitComp->opts.compDbgEnC) { noway_assert((int)offs >= 0); } else #endif { // Dev10 804810 - failing this assert can lead to bad codegen and runtime crashes CLANG_FORMAT_COMMENT_ANCHOR; #ifdef UNIX_AMD64_ABI const LclVarDsc* varDsc = emitComp->lvaGetDesc(var); bool isRegPassedArg = varDsc->lvIsParam && varDsc->lvIsRegArg; // Register passed args could have a stack offset of 0. noway_assert((int)offs < 0 || isRegPassedArg || emitComp->opts.IsOSR()); #else // !UNIX_AMD64_ABI // OSR transitioning to RBP frame currently can have mid-frame FP noway_assert(((int)offs < 0) || emitComp->opts.IsOSR()); #endif // !UNIX_AMD64_ABI } assert(emitComp->lvaTempsHaveLargerOffsetThanVars()); // lvaInlinedPInvokeFrameVar and lvaStubArgumentVar are placed below the temps if (unsigned(var) == emitComp->lvaInlinedPInvokeFrameVar || unsigned(var) == emitComp->lvaStubArgumentVar) { offs -= emitMaxTmpSize; } if ((int)offs < 0) { // offset is negative return size + ((int(offs) >= SCHAR_MIN) ? sizeof(char) : sizeof(int)); } #ifdef TARGET_AMD64 // This case arises for localloc frames else { return size + ((offs <= SCHAR_MAX) ? sizeof(char) : sizeof(int)); } #endif } if (emitComp->lvaTempsHaveLargerOffsetThanVars() == false) { offs += emitMaxTmpSize; } } } assert((int)offs >= 0); #if !FEATURE_FIXED_OUT_ARGS /* Are we addressing off of ESP? */ if (!emitHasFramePtr) { /* Adjust the effective offset if necessary */ if (emitCntStackDepth) offs += emitCurStackLvl; // we could (and used to) check for the special case [sp] here but the stack offset // estimator was off, and there is very little harm in overestimating for such a // rare case. } #endif // !FEATURE_FIXED_OUT_ARGS // printf("lcl = %04X, tmp = %04X, stk = %04X, offs = %04X\n", // emitLclSize, emitMaxTmpSize, emitCurStackLvl, offs); #ifdef TARGET_AMD64 bool useSmallEncoding = (SCHAR_MIN <= (int)offs) && ((int)offs <= SCHAR_MAX); #else bool useSmallEncoding = (offs <= size_t(SCHAR_MAX)); #endif // If it is ESP based, and the offset is zero, we will not encode the disp part. if (!EBPbased && offs == 0) { return size; } else { return size + (useSmallEncoding ? sizeof(char) : sizeof(int)); } } inline UNATIVE_OFFSET emitter::emitInsSizeSV(instrDesc* id, code_t code, int var, int dsp) { assert(id->idIns() != INS_invalid); instruction ins = id->idIns(); emitAttr attrSize = id->idOpSize(); UNATIVE_OFFSET prefix = emitGetAdjustedSize(ins, attrSize, code); // REX prefix if (TakesRexWPrefix(ins, attrSize) || IsExtendedReg(id->idReg1(), attrSize) || IsExtendedReg(id->idReg2(), attrSize)) { prefix += emitGetRexPrefixSize(ins); } return prefix + emitInsSizeSV(code, var, dsp); } inline UNATIVE_OFFSET emitter::emitInsSizeSV(instrDesc* id, code_t code, int var, int dsp, int val) { assert(id->idIns() != INS_invalid); instruction ins = id->idIns(); emitAttr attrSize = id->idOpSize(); UNATIVE_OFFSET valSize = EA_SIZE_IN_BYTES(attrSize); UNATIVE_OFFSET prefix = emitGetAdjustedSize(ins, attrSize, code); bool valInByte = ((signed char)val == val) && (ins != INS_mov) && (ins != INS_test); #ifdef TARGET_AMD64 // mov reg, imm64 is the only opcode which takes a full 8 byte immediate // all other opcodes take a sign-extended 4-byte immediate noway_assert(valSize <= sizeof(int) || !id->idIsCnsReloc()); #endif // TARGET_AMD64 if (valSize > sizeof(int)) { valSize = sizeof(int); } if (id->idIsCnsReloc()) { valInByte = false; // relocs can't be placed in a byte assert(valSize == sizeof(int)); } if (valInByte) { valSize = sizeof(char); } else { assert(!IsSSEOrAVXInstruction(ins)); } // 64-bit operand instructions will need a REX.W prefix if (TakesRexWPrefix(ins, attrSize) || IsExtendedReg(id->idReg1(), attrSize) || IsExtendedReg(id->idReg2(), attrSize)) { prefix += emitGetRexPrefixSize(ins); } return prefix + valSize + emitInsSizeSV(code, var, dsp); } /*****************************************************************************/ static bool baseRegisterRequiresSibByte(regNumber base) { #ifdef TARGET_AMD64 return base == REG_ESP || base == REG_R12; #else return base == REG_ESP; #endif } static bool baseRegisterRequiresDisplacement(regNumber base) { #ifdef TARGET_AMD64 return base == REG_EBP || base == REG_R13; #else return base == REG_EBP; #endif } UNATIVE_OFFSET emitter::emitInsSizeAM(instrDesc* id, code_t code) { assert(id->idIns() != INS_invalid); instruction ins = id->idIns(); emitAttr attrSize = id->idOpSize(); /* The displacement field is in an unusual place for (tail-)calls */ ssize_t dsp = (ins == INS_call) || (ins == INS_tail_i_jmp) ? emitGetInsCIdisp(id) : emitGetInsAmdAny(id); bool dspInByte = ((signed char)dsp == (ssize_t)dsp); bool dspIsZero = (dsp == 0); UNATIVE_OFFSET size; // Note that the values in reg and rgx are used in this method to decide // how many bytes will be needed by the address [reg+rgx+cns] // this includes the prefix bytes when reg or rgx are registers R8-R15 regNumber reg; regNumber rgx; // The idAddr field is a union and only some of the instruction formats use the iiaAddrMode variant // these are IF_AWR_*, IF_ARD_*, IF_ARW_* and IF_*_ARD // ideally these should really be the only idInsFmts that we see here // but we have some outliers to deal with: // emitIns_R_L adds IF_RWR_LABEL and calls emitInsSizeAM // emitInsRMW adds IF_MRW_CNS, IF_MRW_RRD, IF_MRW_SHF, and calls emitInsSizeAM switch (id->idInsFmt()) { case IF_RWR_LABEL: case IF_MRW_CNS: case IF_MRW_RRD: case IF_MRW_SHF: reg = REG_NA; rgx = REG_NA; break; default: reg = id->idAddr()->iiaAddrMode.amBaseReg; rgx = id->idAddr()->iiaAddrMode.amIndxReg; break; } if (id->idIsDspReloc()) { dspInByte = false; // relocs can't be placed in a byte dspIsZero = false; // relocs won't always be zero } if (code & 0xFF000000) { size = 4; } else if (code & 0x00FF0000) { // BT supports 16 bit operands and this code doesn't handle the necessary 66 prefix. assert(ins != INS_bt); assert((attrSize == EA_4BYTE) || (attrSize == EA_PTRSIZE) // Only for x64 || (attrSize == EA_16BYTE) || (attrSize == EA_32BYTE) // only for x64 || (ins == INS_movzx) || (ins == INS_movsx) // The prefetch instructions are always 3 bytes and have part of their modr/m byte hardcoded || isPrefetch(ins)); size = 3; } else { size = 2; } size += emitGetAdjustedSize(ins, attrSize, code); if (hasRexPrefix(code)) { // REX prefix size += emitGetRexPrefixSize(ins); } else if (TakesRexWPrefix(ins, attrSize)) { // REX.W prefix size += emitGetRexPrefixSize(ins); } else if (IsExtendedReg(reg, EA_PTRSIZE) || IsExtendedReg(rgx, EA_PTRSIZE) || ((ins != INS_call) && (IsExtendedReg(id->idReg1(), attrSize) || IsExtendedReg(id->idReg2(), attrSize)))) { // Should have a REX byte size += emitGetRexPrefixSize(ins); } if (rgx == REG_NA) { /* The address is of the form "[reg+disp]" */ if (reg == REG_NA) { /* The address is of the form "[disp]" */ CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_X86 // Special case: "mov eax, [disp]" and "mov [disp], eax" can use a smaller 1-byte encoding. if ((ins == INS_mov) && (id->idReg1() == REG_EAX) && ((id->idInsFmt() == IF_RWR_ARD) || (id->idInsFmt() == IF_AWR_RRD))) { // Amd64: this is one case where addr can be 64-bit in size. This is currently unused. // If this ever changes, this code will need to be updated to add "sizeof(INT64)" to "size". assert((size == 2) || ((size == 3) && (id->idOpSize() == EA_2BYTE))); size--; } #endif size += sizeof(INT32); #ifdef TARGET_AMD64 // If id is not marked for reloc, add 1 additional byte for SIB that follows disp32 if (!id->idIsDspReloc()) { size++; } #endif return size; } // If this is just "call reg", we're done. if (id->idIsCallRegPtr()) { assert(ins == INS_call || ins == INS_tail_i_jmp); assert(dsp == 0); return size; } // If the base register is ESP (or R12 on 64-bit systems), a SIB byte must be used. if (baseRegisterRequiresSibByte(reg)) { size++; } // If the base register is EBP (or R13 on 64-bit systems), a displacement is required. // Otherwise, the displacement can be elided if it is zero. if (dspIsZero && !baseRegisterRequiresDisplacement(reg)) { return size; } /* Does the offset fit in a byte? */ if (dspInByte) { size += sizeof(char); } else { size += sizeof(INT32); } } else { /* An index register is present */ size++; /* Is the index value scaled? */ if (emitDecodeScale(id->idAddr()->iiaAddrMode.amScale) > 1) { /* Is there a base register? */ if (reg != REG_NA) { /* The address is "[reg + {2/4/8} * rgx + icon]" */ if (dspIsZero && !baseRegisterRequiresDisplacement(reg)) { /* The address is "[reg + {2/4/8} * rgx]" */ } else { /* The address is "[reg + {2/4/8} * rgx + disp]" */ if (dspInByte) { size += sizeof(char); } else { size += sizeof(int); } } } else { /* The address is "[{2/4/8} * rgx + icon]" */ size += sizeof(INT32); } } else { // When we are using the SIB or VSIB format with EBP or R13 as a base, we must emit at least // a 1 byte displacement (this is a special case in the encoding to allow for the case of no // base register at all). In order to avoid this when we have no scaling, we can reverse the // registers so that we don't have to add that extra byte. However, we can't do that if the // index register is a vector, such as for a gather instruction. // if (dspIsZero && baseRegisterRequiresDisplacement(reg) && !baseRegisterRequiresDisplacement(rgx) && !isFloatReg(rgx)) { // Swap reg and rgx, such that reg is not EBP/R13. regNumber tmp = reg; id->idAddr()->iiaAddrMode.amBaseReg = reg = rgx; id->idAddr()->iiaAddrMode.amIndxReg = rgx = tmp; } /* The address is "[reg+rgx+dsp]" */ if (dspIsZero && !baseRegisterRequiresDisplacement(reg)) { /* This is [reg+rgx]" */ } else { /* This is [reg+rgx+dsp]" */ if (dspInByte) { size += sizeof(char); } else { size += sizeof(int); } } } } return size; } inline UNATIVE_OFFSET emitter::emitInsSizeAM(instrDesc* id, code_t code, int val) { assert(id->idIns() != INS_invalid); instruction ins = id->idIns(); UNATIVE_OFFSET valSize = EA_SIZE_IN_BYTES(id->idOpSize()); bool valInByte = ((signed char)val == val) && (ins != INS_mov) && (ins != INS_test); // We should never generate BT mem,reg because it has poor performance. BT mem,imm might be useful // but it requires special handling of the immediate value (it is always encoded in a byte). // Let's not complicate things until this is needed. assert(ins != INS_bt); #ifdef TARGET_AMD64 // mov reg, imm64 is the only opcode which takes a full 8 byte immediate // all other opcodes take a sign-extended 4-byte immediate noway_assert(valSize <= sizeof(INT32) || !id->idIsCnsReloc()); #endif // TARGET_AMD64 if (valSize > sizeof(INT32)) { valSize = sizeof(INT32); } if (id->idIsCnsReloc()) { valInByte = false; // relocs can't be placed in a byte assert(valSize == sizeof(INT32)); } if (valInByte) { valSize = sizeof(char); } else { assert(!IsSSEOrAVXInstruction(ins)); } return valSize + emitInsSizeAM(id, code); } inline UNATIVE_OFFSET emitter::emitInsSizeCV(instrDesc* id, code_t code) { assert(id->idIns() != INS_invalid); instruction ins = id->idIns(); emitAttr attrSize = id->idOpSize(); // fgMorph changes any statics that won't fit into 32-bit addresses // into constants with an indir, rather than GT_CLS_VAR // so we should only hit this path for statics that are RIP-relative UNATIVE_OFFSET size = sizeof(INT32); size += emitGetAdjustedSize(ins, attrSize, code); bool includeRexPrefixSize = true; // 64-bit operand instructions will need a REX.W prefix if (TakesRexWPrefix(ins, attrSize) || IsExtendedReg(id->idReg1(), attrSize) || IsExtendedReg(id->idReg2(), attrSize)) { size += emitGetRexPrefixSize(ins); includeRexPrefixSize = false; } return size + emitInsSize(code, includeRexPrefixSize); } inline UNATIVE_OFFSET emitter::emitInsSizeCV(instrDesc* id, code_t code, int val) { instruction ins = id->idIns(); UNATIVE_OFFSET valSize = EA_SIZE_IN_BYTES(id->idOpSize()); bool valInByte = ((signed char)val == val) && (ins != INS_mov) && (ins != INS_test); #ifdef TARGET_AMD64 // mov reg, imm64 is the only opcode which takes a full 8 byte immediate // all other opcodes take a sign-extended 4-byte immediate noway_assert(valSize <= sizeof(INT32) || !id->idIsCnsReloc()); #endif // TARGET_AMD64 if (valSize > sizeof(INT32)) { valSize = sizeof(INT32); } if (id->idIsCnsReloc()) { valInByte = false; // relocs can't be placed in a byte assert(valSize == sizeof(INT32)); } if (valInByte) { valSize = sizeof(char); } else { assert(!IsSSEOrAVXInstruction(ins)); } return valSize + emitInsSizeCV(id, code); } /***************************************************************************** * * Allocate instruction descriptors for instructions with address modes. */ inline emitter::instrDesc* emitter::emitNewInstrAmd(emitAttr size, ssize_t dsp) { if (dsp < AM_DISP_MIN || dsp > AM_DISP_MAX) { instrDescAmd* id = emitAllocInstrAmd(size); id->idSetIsLargeDsp(); #ifdef DEBUG id->idAddr()->iiaAddrMode.amDisp = AM_DISP_BIG_VAL; #endif id->idaAmdVal = dsp; return id; } else { instrDesc* id = emitAllocInstr(size); id->idAddr()->iiaAddrMode.amDisp = dsp; assert(id->idAddr()->iiaAddrMode.amDisp == dsp); // make sure the value fit return id; } } /***************************************************************************** * * Set the displacement field in an instruction. Only handles instrDescAmd type. */ inline void emitter::emitSetAmdDisp(instrDescAmd* id, ssize_t dsp) { if (dsp < AM_DISP_MIN || dsp > AM_DISP_MAX) { id->idSetIsLargeDsp(); #ifdef DEBUG id->idAddr()->iiaAddrMode.amDisp = AM_DISP_BIG_VAL; #endif id->idaAmdVal = dsp; } else { id->idSetIsSmallDsp(); id->idAddr()->iiaAddrMode.amDisp = dsp; assert(id->idAddr()->iiaAddrMode.amDisp == dsp); // make sure the value fit } } /***************************************************************************** * * Allocate an instruction descriptor for an instruction that uses both * an address mode displacement and a constant. */ emitter::instrDesc* emitter::emitNewInstrAmdCns(emitAttr size, ssize_t dsp, int cns) { if (dsp >= AM_DISP_MIN && dsp <= AM_DISP_MAX) { instrDesc* id = emitNewInstrCns(size, cns); id->idAddr()->iiaAddrMode.amDisp = dsp; assert(id->idAddr()->iiaAddrMode.amDisp == dsp); // make sure the value fit return id; } else { if (instrDesc::fitsInSmallCns(cns)) { instrDescAmd* id = emitAllocInstrAmd(size); id->idSetIsLargeDsp(); #ifdef DEBUG id->idAddr()->iiaAddrMode.amDisp = AM_DISP_BIG_VAL; #endif id->idaAmdVal = dsp; id->idSmallCns(cns); return id; } else { instrDescCnsAmd* id = emitAllocInstrCnsAmd(size); id->idSetIsLargeCns(); id->idacCnsVal = cns; id->idSetIsLargeDsp(); #ifdef DEBUG id->idAddr()->iiaAddrMode.amDisp = AM_DISP_BIG_VAL; #endif id->idacAmdVal = dsp; return id; } } } /***************************************************************************** * * Add a NOP instruction of the given size. */ void emitter::emitIns_Nop(unsigned size) { assert(size <= MAX_ENCODED_SIZE); instrDesc* id = emitNewInstr(); id->idIns(INS_nop); id->idInsFmt(IF_NONE); id->idCodeSize(size); dispIns(id); emitCurIGsize += size; } /***************************************************************************** * * Add an instruction with no operands. */ void emitter::emitIns(instruction ins) { UNATIVE_OFFSET sz; instrDesc* id = emitNewInstr(); code_t code = insCodeMR(ins); #ifdef DEBUG { // We cannot have #ifdef inside macro expansion. bool assertCond = (ins == INS_cdq || ins == INS_int3 || ins == INS_lock || ins == INS_leave || ins == INS_movsb || ins == INS_movsd || ins == INS_movsp || ins == INS_nop || ins == INS_r_movsb || ins == INS_r_movsd || ins == INS_r_movsp || ins == INS_r_stosb || ins == INS_r_stosd || ins == INS_r_stosp || ins == INS_ret || ins == INS_sahf || ins == INS_stosb || ins == INS_stosd || ins == INS_stosp // These instructions take zero operands || ins == INS_vzeroupper || ins == INS_lfence || ins == INS_mfence || ins == INS_sfence || ins == INS_pause); assert(assertCond); } #endif // DEBUG assert(!hasRexPrefix(code)); // Can't have a REX bit with no operands, right? if (code & 0xFF000000) { sz = 2; // TODO-XArch-Bug?: Shouldn't this be 4? Or maybe we should assert that we don't see this case. } else if (code & 0x00FF0000) { sz = 3; } else if (code & 0x0000FF00) { sz = 2; } else { sz = 1; } // vzeroupper includes its 2-byte VEX prefix in its MR code. assert((ins != INS_vzeroupper) || (sz == 3)); insFormat fmt = IF_NONE; id->idIns(ins); id->idInsFmt(fmt); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } // Add an instruction with no operands, but whose encoding depends on the size // (Only CDQ/CQO/CWDE/CDQE currently) void emitter::emitIns(instruction ins, emitAttr attr) { UNATIVE_OFFSET sz; instrDesc* id = emitNewInstr(attr); code_t code = insCodeMR(ins); assert((ins == INS_cdq) || (ins == INS_cwde)); assert((code & 0xFFFFFF00) == 0); sz = 1; insFormat fmt = IF_NONE; sz += emitGetAdjustedSize(ins, attr, code); if (TakesRexWPrefix(ins, attr)) { sz += emitGetRexPrefixSize(ins); } id->idIns(ins); id->idInsFmt(fmt); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } //------------------------------------------------------------------------ // emitMapFmtForIns: map the instruction format based on the instruction. // Shift-by-a-constant instructions have a special format. // // Arguments: // fmt - the instruction format to map // ins - the instruction // // Returns: // The mapped instruction format. // emitter::insFormat emitter::emitMapFmtForIns(insFormat fmt, instruction ins) { switch (ins) { case INS_rol_N: case INS_ror_N: case INS_rcl_N: case INS_rcr_N: case INS_shl_N: case INS_shr_N: case INS_sar_N: { switch (fmt) { case IF_RRW_CNS: return IF_RRW_SHF; case IF_MRW_CNS: return IF_MRW_SHF; case IF_SRW_CNS: return IF_SRW_SHF; case IF_ARW_CNS: return IF_ARW_SHF; default: unreached(); } } default: return fmt; } } //------------------------------------------------------------------------ // emitMapFmtAtoM: map the address mode formats ARD, ARW, and AWR to their direct address equivalents. // // Arguments: // fmt - the instruction format to map // // Returns: // The mapped instruction format. // emitter::insFormat emitter::emitMapFmtAtoM(insFormat fmt) { switch (fmt) { case IF_ARD: return IF_MRD; case IF_AWR: return IF_MWR; case IF_ARW: return IF_MRW; case IF_RRD_ARD: return IF_RRD_MRD; case IF_RWR_ARD: return IF_RWR_MRD; case IF_RWR_ARD_CNS: return IF_RWR_MRD_CNS; case IF_RRW_ARD: return IF_RRW_MRD; case IF_RRW_ARD_CNS: return IF_RRW_MRD_CNS; case IF_RWR_RRD_ARD: return IF_RWR_RRD_MRD; case IF_RWR_RRD_ARD_CNS: return IF_RWR_RRD_MRD_CNS; case IF_RWR_RRD_ARD_RRD: return IF_RWR_RRD_MRD_RRD; case IF_ARD_RRD: return IF_MRD_RRD; case IF_AWR_RRD: return IF_MWR_RRD; case IF_ARW_RRD: return IF_MRW_RRD; case IF_ARD_CNS: return IF_MRD_CNS; case IF_AWR_CNS: return IF_MWR_CNS; case IF_ARW_CNS: return IF_MRW_CNS; case IF_AWR_RRD_CNS: return IF_MWR_RRD_CNS; case IF_ARW_SHF: return IF_MRW_SHF; default: unreached(); } } //------------------------------------------------------------------------ // emitHandleMemOp: For a memory operand, fill in the relevant fields of the instrDesc. // // Arguments: // indir - the memory operand. // id - the instrDesc to fill in. // fmt - the instruction format to use. This must be one of the ARD, AWR, or ARW formats. If necessary (such as for // GT_CLS_VAR_ADDR), this function will map it to the correct format. // ins - the instruction we are generating. This might affect the instruction format we choose. // // Assumptions: // The correctly sized instrDesc must already be created, e.g., via emitNewInstrAmd() or emitNewInstrAmdCns(); // // Post-conditions: // For base address of int constant: // -- the caller must have added the int constant base to the instrDesc when creating it via // emitNewInstrAmdCns(). // For simple address modes (base + scale * index + offset): // -- the base register, index register, and scale factor are set. // -- the caller must have added the addressing mode offset int constant to the instrDesc when creating it via // emitNewInstrAmdCns(). // // The instruction format is set. // // idSetIsDspReloc() is called if necessary. // void emitter::emitHandleMemOp(GenTreeIndir* indir, instrDesc* id, insFormat fmt, instruction ins) { assert(fmt != IF_NONE); GenTree* memBase = indir->Base(); if ((memBase != nullptr) && memBase->isContained() && (memBase->OperGet() == GT_CLS_VAR_ADDR)) { CORINFO_FIELD_HANDLE fldHnd = memBase->AsClsVar()->gtClsVarHnd; // Static always need relocs if (!jitStaticFldIsGlobAddr(fldHnd)) { // Contract: // fgMorphField() changes any statics that won't fit into 32-bit addresses into // constants with an indir, rather than GT_CLS_VAR, based on reloc type hint given // by VM. Hence emitter should always mark GT_CLS_VAR_ADDR as relocatable. // // Data section constants: these get allocated close to code block of the method and // always addressable IP relative. These too should be marked as relocatable. id->idSetIsDspReloc(); } id->idAddr()->iiaFieldHnd = fldHnd; id->idInsFmt(emitMapFmtForIns(emitMapFmtAtoM(fmt), ins)); } else if ((memBase != nullptr) && memBase->IsCnsIntOrI() && memBase->isContained()) { // Absolute addresses marked as contained should fit within the base of addr mode. assert(memBase->AsIntConCommon()->FitsInAddrBase(emitComp)); // If we reach here, either: // - we are not generating relocatable code, (typically the non-AOT JIT case) // - the base address is a handle represented by an integer constant, // - the base address is a constant zero, or // - the base address is a constant that fits into the memory instruction (this can happen on x86). // This last case is captured in the FitsInAddrBase method which is used by Lowering to determine that it can // be contained. // assert(!emitComp->opts.compReloc || memBase->IsIconHandle() || memBase->IsIntegralConst(0) || memBase->AsIntConCommon()->FitsInAddrBase(emitComp)); if (memBase->AsIntConCommon()->AddrNeedsReloc(emitComp)) { id->idSetIsDspReloc(); } id->idAddr()->iiaAddrMode.amBaseReg = REG_NA; id->idAddr()->iiaAddrMode.amIndxReg = REG_NA; id->idAddr()->iiaAddrMode.amScale = emitter::OPSZ1; // for completeness id->idInsFmt(emitMapFmtForIns(fmt, ins)); // Absolute address must have already been set in the instrDesc constructor. assert(emitGetInsAmdAny(id) == memBase->AsIntConCommon()->IconValue()); } else { regNumber amBaseReg = REG_NA; if (memBase != nullptr) { assert(!memBase->isContained()); amBaseReg = memBase->GetRegNum(); assert(amBaseReg != REG_NA); } regNumber amIndxReg = REG_NA; if (indir->HasIndex()) { GenTree* index = indir->Index(); assert(!index->isContained()); amIndxReg = index->GetRegNum(); assert(amIndxReg != REG_NA); } assert((amBaseReg != REG_NA) || (amIndxReg != REG_NA) || (indir->Offset() != 0)); // At least one should be set. id->idAddr()->iiaAddrMode.amBaseReg = amBaseReg; id->idAddr()->iiaAddrMode.amIndxReg = amIndxReg; id->idAddr()->iiaAddrMode.amScale = emitEncodeScale(indir->Scale()); id->idInsFmt(emitMapFmtForIns(fmt, ins)); // disp must have already been set in the instrDesc constructor. assert(emitGetInsAmdAny(id) == indir->Offset()); // make sure "disp" is stored properly } } // Takes care of storing all incoming register parameters // into its corresponding shadow space (defined by the x64 ABI) void emitter::spillIntArgRegsToShadowSlots() { unsigned argNum; instrDesc* id; UNATIVE_OFFSET sz; assert(emitComp->compGeneratingProlog); for (argNum = 0; argNum < MAX_REG_ARG; ++argNum) { regNumber argReg = intArgRegs[argNum]; // The offsets for the shadow space start at RSP + 8 // (right before the caller return address) int offset = (argNum + 1) * EA_PTRSIZE; id = emitNewInstrAmd(EA_PTRSIZE, offset); id->idIns(INS_mov); id->idInsFmt(IF_AWR_RRD); id->idAddr()->iiaAddrMode.amBaseReg = REG_SPBASE; id->idAddr()->iiaAddrMode.amIndxReg = REG_NA; id->idAddr()->iiaAddrMode.amScale = emitEncodeScale(1); // The offset has already been set in the intrDsc ctor, // make sure we got it right. assert(emitGetInsAmdAny(id) == ssize_t(offset)); id->idReg1(argReg); sz = emitInsSizeAM(id, insCodeMR(INS_mov)); id->idCodeSize(sz); emitCurIGsize += sz; } } //------------------------------------------------------------------------ // emitInsLoadInd: Emits a "mov reg, [mem]" (or a variant such as "movzx" or "movss") // instruction for a GT_IND node. // // Arguments: // ins - the instruction to emit // attr - the instruction operand size // dstReg - the destination register // mem - the GT_IND node // void emitter::emitInsLoadInd(instruction ins, emitAttr attr, regNumber dstReg, GenTreeIndir* mem) { assert(mem->OperIs(GT_IND, GT_NULLCHECK)); GenTree* addr = mem->Addr(); if (addr->OperGet() == GT_CLS_VAR_ADDR) { emitIns_R_C(ins, attr, dstReg, addr->AsClsVar()->gtClsVarHnd, 0); return; } if (addr->OperIsLocalAddr()) { GenTreeLclVarCommon* varNode = addr->AsLclVarCommon(); unsigned offset = varNode->GetLclOffs(); emitIns_R_S(ins, attr, dstReg, varNode->GetLclNum(), offset); // Updating variable liveness after instruction was emitted. // TODO-Review: it appears that this call to genUpdateLife does nothing because it // returns quickly when passed GT_LCL_VAR_ADDR or GT_LCL_FLD_ADDR. Below, emitInsStoreInd // had similar code that replaced `varNode` with `mem` (to fix a GC hole). It might be // appropriate to do that here as well, but doing so showed no asm diffs, so it's not // clear when this scenario gets hit, at least for GC refs. codeGen->genUpdateLife(varNode); return; } assert(addr->OperIsAddrMode() || (addr->IsCnsIntOrI() && addr->isContained()) || !addr->isContained()); ssize_t offset = mem->Offset(); instrDesc* id = emitNewInstrAmd(attr, offset); id->idIns(ins); id->idReg1(dstReg); emitHandleMemOp(mem, id, IF_RWR_ARD, ins); UNATIVE_OFFSET sz = emitInsSizeAM(id, insCodeRM(ins)); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } //------------------------------------------------------------------------ // emitInsStoreInd: Emits a "mov [mem], reg/imm" (or a variant such as "movss") // instruction for a GT_STOREIND node. // // Arguments: // ins - the instruction to emit // attr - the instruction operand size // mem - the GT_STOREIND node // void emitter::emitInsStoreInd(instruction ins, emitAttr attr, GenTreeStoreInd* mem) { assert(mem->OperIs(GT_STOREIND)); GenTree* addr = mem->Addr(); GenTree* data = mem->Data(); if (addr->OperGet() == GT_CLS_VAR_ADDR) { if (data->isContainedIntOrIImmed()) { emitIns_C_I(ins, attr, addr->AsClsVar()->gtClsVarHnd, 0, (int)data->AsIntConCommon()->IconValue()); } else { assert(!data->isContained()); emitIns_C_R(ins, attr, addr->AsClsVar()->gtClsVarHnd, data->GetRegNum(), 0); } return; } if (addr->OperIsLocalAddr()) { GenTreeLclVarCommon* varNode = addr->AsLclVarCommon(); unsigned offset = varNode->GetLclOffs(); if (data->isContainedIntOrIImmed()) { emitIns_S_I(ins, attr, varNode->GetLclNum(), offset, (int)data->AsIntConCommon()->IconValue()); } else { assert(!data->isContained()); emitIns_S_R(ins, attr, data->GetRegNum(), varNode->GetLclNum(), offset); } // Updating variable liveness after instruction was emitted codeGen->genUpdateLife(mem); return; } ssize_t offset = mem->Offset(); UNATIVE_OFFSET sz; instrDesc* id; if (data->isContainedIntOrIImmed()) { int icon = (int)data->AsIntConCommon()->IconValue(); id = emitNewInstrAmdCns(attr, offset, icon); id->idIns(ins); emitHandleMemOp(mem, id, IF_AWR_CNS, ins); sz = emitInsSizeAM(id, insCodeMI(ins), icon); id->idCodeSize(sz); } else { assert(!data->isContained()); id = emitNewInstrAmd(attr, offset); id->idIns(ins); emitHandleMemOp(mem, id, IF_AWR_RRD, ins); id->idReg1(data->GetRegNum()); sz = emitInsSizeAM(id, insCodeMR(ins)); id->idCodeSize(sz); } dispIns(id); emitCurIGsize += sz; } //------------------------------------------------------------------------ // emitInsStoreLcl: Emits a "mov [mem], reg/imm" (or a variant such as "movss") // instruction for a GT_STORE_LCL_VAR node. // // Arguments: // ins - the instruction to emit // attr - the instruction operand size // varNode - the GT_STORE_LCL_VAR node // void emitter::emitInsStoreLcl(instruction ins, emitAttr attr, GenTreeLclVarCommon* varNode) { assert(varNode->OperIs(GT_STORE_LCL_VAR)); assert(varNode->GetRegNum() == REG_NA); // stack store GenTree* data = varNode->gtGetOp1(); codeGen->inst_set_SV_var(varNode); if (data->isContainedIntOrIImmed()) { emitIns_S_I(ins, attr, varNode->GetLclNum(), 0, (int)data->AsIntConCommon()->IconValue()); } else { assert(!data->isContained()); emitIns_S_R(ins, attr, data->GetRegNum(), varNode->GetLclNum(), 0); } // Updating variable liveness after instruction was emitted codeGen->genUpdateLife(varNode); } //------------------------------------------------------------------------ // emitInsBinary: Emits an instruction for a node which takes two operands // // Arguments: // ins - the instruction to emit // attr - the instruction operand size // dst - the destination and first source operand // src - the second source operand // // Assumptions: // i) caller of this routine needs to call genConsumeReg() // ii) caller of this routine needs to call genProduceReg() regNumber emitter::emitInsBinary(instruction ins, emitAttr attr, GenTree* dst, GenTree* src) { // We can only have one memory operand and only src can be a constant operand // However, the handling for a given operand type (mem, cns, or other) is fairly // consistent regardless of whether they are src or dst. As such, we will find // the type of each operand and only check them against src/dst where relevant. GenTree* memOp = nullptr; GenTree* cnsOp = nullptr; GenTree* otherOp = nullptr; if (dst->isContained() || (dst->isLclField() && (dst->GetRegNum() == REG_NA)) || dst->isUsedFromSpillTemp()) { // dst can only be a modrm // dst on 3opImul isn't really the dst assert(dst->isUsedFromMemory() || (dst->GetRegNum() == REG_NA) || instrIs3opImul(ins)); assert(!src->isUsedFromMemory()); memOp = dst; if (src->isContained()) { assert(src->IsCnsIntOrI()); cnsOp = src; } else { otherOp = src; } } else if (src->isContained() || src->isUsedFromSpillTemp()) { assert(!dst->isUsedFromMemory()); otherOp = dst; if ((src->IsCnsIntOrI() || src->IsCnsFltOrDbl()) && !src->isUsedFromSpillTemp()) { assert(!src->isUsedFromMemory() || src->IsCnsFltOrDbl()); cnsOp = src; } else { assert(src->isUsedFromMemory()); memOp = src; } } // At this point, we either have a memory operand or we don't. // // If we don't then the logic is very simple and we will either be emitting a // `reg, immed` instruction (if src is a cns) or a `reg, reg` instruction otherwise. // // If we do have a memory operand, the logic is a bit more complicated as we need // to do different things depending on the type of memory operand. These types include: // * Spill temp // * Indirect access // * Local variable // * Class variable // * Addressing mode [base + index * scale + offset] // * Local field // * Local variable // // Most of these types (except Indirect: Class variable and Indirect: Addressing mode) // give us a a local variable number and an offset and access memory on the stack // // Indirect: Class variable is used for access static class variables and gives us a handle // to the memory location we read from // // Indirect: Addressing mode is used for the remaining memory accesses and will give us // a base address, an index, a scale, and an offset. These are combined to let us easily // access the given memory location. // // In all of the memory access cases, we determine which form to emit (e.g. `reg, [mem]` // or `[mem], reg`) by comparing memOp to src to determine which `emitIns_*` method needs // to be called. The exception is for the `[mem], immed` case (for Indirect: Class variable) // where only src can be the immediate. if (memOp != nullptr) { TempDsc* tmpDsc = nullptr; unsigned varNum = BAD_VAR_NUM; unsigned offset = (unsigned)-1; if (memOp->isUsedFromSpillTemp()) { assert(memOp->IsRegOptional()); tmpDsc = codeGen->getSpillTempDsc(memOp); varNum = tmpDsc->tdTempNum(); offset = 0; codeGen->regSet.tmpRlsTemp(tmpDsc); } else if (memOp->isIndir()) { GenTreeIndir* memIndir = memOp->AsIndir(); GenTree* memBase = memIndir->gtOp1; switch (memBase->OperGet()) { case GT_LCL_VAR_ADDR: case GT_LCL_FLD_ADDR: { assert(memBase->isContained()); varNum = memBase->AsLclVarCommon()->GetLclNum(); offset = memBase->AsLclVarCommon()->GetLclOffs(); // Ensure that all the GenTreeIndir values are set to their defaults. assert(!memIndir->HasIndex()); assert(memIndir->Scale() == 1); assert(memIndir->Offset() == 0); break; } case GT_CLS_VAR_ADDR: { if (memOp == src) { assert(otherOp == dst); assert(cnsOp == nullptr); if (instrHasImplicitRegPairDest(ins)) { // src is a class static variable // dst is implicit - RDX:RAX emitIns_C(ins, attr, memBase->AsClsVar()->gtClsVarHnd, 0); } else { // src is a class static variable // dst is a register emitIns_R_C(ins, attr, dst->GetRegNum(), memBase->AsClsVar()->gtClsVarHnd, 0); } } else { assert(memOp == dst); if (cnsOp != nullptr) { assert(cnsOp == src); assert(otherOp == nullptr); assert(src->IsCnsIntOrI()); // src is an contained immediate // dst is a class static variable emitIns_C_I(ins, attr, memBase->AsClsVar()->gtClsVarHnd, 0, (int)src->AsIntConCommon()->IconValue()); } else { assert(otherOp == src); // src is a register // dst is a class static variable emitIns_C_R(ins, attr, memBase->AsClsVar()->gtClsVarHnd, src->GetRegNum(), 0); } } return dst->GetRegNum(); } default: // Addressing mode [base + index * scale + offset] { instrDesc* id = nullptr; if (cnsOp != nullptr) { assert(memOp == dst); assert(cnsOp == src); assert(otherOp == nullptr); assert(src->IsCnsIntOrI()); id = emitNewInstrAmdCns(attr, memIndir->Offset(), (int)src->AsIntConCommon()->IconValue()); } else { ssize_t offset = memIndir->Offset(); id = emitNewInstrAmd(attr, offset); id->idIns(ins); GenTree* regTree = (memOp == src) ? dst : src; // there must be one non-contained op assert(!regTree->isContained()); id->idReg1(regTree->GetRegNum()); } assert(id != nullptr); id->idIns(ins); // Set the instruction. // Determine the instruction format insFormat fmt = IF_NONE; if (memOp == src) { assert(cnsOp == nullptr); assert(otherOp == dst); if (instrHasImplicitRegPairDest(ins)) { fmt = emitInsModeFormat(ins, IF_ARD); } else { fmt = emitInsModeFormat(ins, IF_RRD_ARD); } } else { assert(memOp == dst); if (cnsOp != nullptr) { assert(cnsOp == src); assert(otherOp == nullptr); assert(src->IsCnsIntOrI()); fmt = emitInsModeFormat(ins, IF_ARD_CNS); } else { assert(otherOp == src); fmt = emitInsModeFormat(ins, IF_ARD_RRD); } } assert(fmt != IF_NONE); emitHandleMemOp(memIndir, id, fmt, ins); // Determine the instruction size UNATIVE_OFFSET sz = 0; if (memOp == src) { assert(otherOp == dst); assert(cnsOp == nullptr); if (instrHasImplicitRegPairDest(ins)) { sz = emitInsSizeAM(id, insCode(ins)); } else { sz = emitInsSizeAM(id, insCodeRM(ins)); } } else { assert(memOp == dst); if (cnsOp != nullptr) { assert(memOp == dst); assert(cnsOp == src); assert(otherOp == nullptr); sz = emitInsSizeAM(id, insCodeMI(ins), (int)src->AsIntConCommon()->IconValue()); } else { assert(otherOp == src); sz = emitInsSizeAM(id, insCodeMR(ins)); } } assert(sz != 0); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; return (memOp == src) ? dst->GetRegNum() : REG_NA; } } } else { switch (memOp->OperGet()) { case GT_LCL_FLD: case GT_STORE_LCL_FLD: varNum = memOp->AsLclFld()->GetLclNum(); offset = memOp->AsLclFld()->GetLclOffs(); break; case GT_LCL_VAR: { assert(memOp->IsRegOptional() || !emitComp->lvaTable[memOp->AsLclVar()->GetLclNum()].lvIsRegCandidate()); varNum = memOp->AsLclVar()->GetLclNum(); offset = 0; break; } default: unreached(); break; } } // Ensure we got a good varNum and offset. // We also need to check for `tmpDsc != nullptr` since spill temp numbers // are negative and start with -1, which also happens to be BAD_VAR_NUM. assert((varNum != BAD_VAR_NUM) || (tmpDsc != nullptr)); assert(offset != (unsigned)-1); if (memOp == src) { assert(otherOp == dst); assert(cnsOp == nullptr); if (instrHasImplicitRegPairDest(ins)) { // src is a stack based local variable // dst is implicit - RDX:RAX emitIns_S(ins, attr, varNum, offset); } else { // src is a stack based local variable // dst is a register emitIns_R_S(ins, attr, dst->GetRegNum(), varNum, offset); } } else { assert(memOp == dst); assert((dst->GetRegNum() == REG_NA) || dst->IsRegOptional()); if (cnsOp != nullptr) { assert(cnsOp == src); assert(otherOp == nullptr); assert(src->IsCnsIntOrI()); // src is an contained immediate // dst is a stack based local variable emitIns_S_I(ins, attr, varNum, offset, (int)src->AsIntConCommon()->IconValue()); } else { assert(otherOp == src); assert(!src->isContained()); // src is a register // dst is a stack based local variable emitIns_S_R(ins, attr, src->GetRegNum(), varNum, offset); } } } else if (cnsOp != nullptr) // reg, immed { assert(cnsOp == src); assert(otherOp == dst); if (src->IsCnsIntOrI()) { assert(!dst->isContained()); GenTreeIntConCommon* intCns = src->AsIntConCommon(); emitIns_R_I(ins, attr, dst->GetRegNum(), intCns->IconValue()); } else { assert(src->IsCnsFltOrDbl()); GenTreeDblCon* dblCns = src->AsDblCon(); CORINFO_FIELD_HANDLE hnd = emitFltOrDblConst(dblCns->gtDconVal, emitTypeSize(dblCns)); emitIns_R_C(ins, attr, dst->GetRegNum(), hnd, 0); } } else // reg, reg { assert(otherOp == nullptr); assert(!src->isContained() && !dst->isContained()); if (instrHasImplicitRegPairDest(ins)) { emitIns_R(ins, attr, src->GetRegNum()); } else { emitIns_R_R(ins, attr, dst->GetRegNum(), src->GetRegNum()); } } return dst->GetRegNum(); } //------------------------------------------------------------------------ // emitInsRMW: Emit logic for Read-Modify-Write binary instructions. // // Responsible for emitting a single instruction that will perform an operation of the form: // *addr = *addr <BinOp> src // For example: // ADD [RAX], RCX // // Arguments: // ins - instruction to generate // attr - emitter attribute for instruction // storeInd - indir for RMW addressing mode // src - source operand of instruction // // Assumptions: // Lowering has taken care of recognizing the StoreInd pattern of: // StoreInd( AddressTree, BinOp( Ind ( AddressTree ), Operand ) ) // The address to store is already sitting in a register. // // Notes: // This is a no-produce operation, meaning that no register output will // be produced for future use in the code stream. // void emitter::emitInsRMW(instruction ins, emitAttr attr, GenTreeStoreInd* storeInd, GenTree* src) { GenTree* addr = storeInd->Addr(); addr = addr->gtSkipReloadOrCopy(); assert(addr->OperIs(GT_LCL_VAR, GT_LCL_VAR_ADDR, GT_LEA, GT_CLS_VAR_ADDR, GT_CNS_INT)); instrDesc* id = nullptr; UNATIVE_OFFSET sz; ssize_t offset = 0; if (addr->OperGet() != GT_CLS_VAR_ADDR) { offset = storeInd->Offset(); } if (src->isContainedIntOrIImmed()) { GenTreeIntConCommon* intConst = src->AsIntConCommon(); int iconVal = (int)intConst->IconValue(); switch (ins) { case INS_rcl_N: case INS_rcr_N: case INS_rol_N: case INS_ror_N: case INS_shl_N: case INS_shr_N: case INS_sar_N: iconVal &= 0x7F; break; default: break; } if (addr->isContained() && addr->OperIsLocalAddr()) { GenTreeLclVarCommon* lclVar = addr->AsLclVarCommon(); emitIns_S_I(ins, attr, lclVar->GetLclNum(), lclVar->GetLclOffs(), iconVal); return; } else { id = emitNewInstrAmdCns(attr, offset, iconVal); emitHandleMemOp(storeInd, id, IF_ARW_CNS, ins); id->idIns(ins); sz = emitInsSizeAM(id, insCodeMI(ins), iconVal); } } else { assert(!src->isContained()); // there must be one non-contained src // ind, reg id = emitNewInstrAmd(attr, offset); emitHandleMemOp(storeInd, id, IF_ARW_RRD, ins); id->idReg1(src->GetRegNum()); id->idIns(ins); sz = emitInsSizeAM(id, insCodeMR(ins)); } id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } //------------------------------------------------------------------------ // emitInsRMW: Emit logic for Read-Modify-Write unary instructions. // // Responsible for emitting a single instruction that will perform an operation of the form: // *addr = UnaryOp *addr // For example: // NOT [RAX] // // Arguments: // ins - instruction to generate // attr - emitter attribute for instruction // storeInd - indir for RMW addressing mode // // Assumptions: // Lowering has taken care of recognizing the StoreInd pattern of: // StoreInd( AddressTree, UnaryOp( Ind ( AddressTree ) ) ) // The address to store is already sitting in a register. // // Notes: // This is a no-produce operation, meaning that no register output will // be produced for future use in the code stream. // void emitter::emitInsRMW(instruction ins, emitAttr attr, GenTreeStoreInd* storeInd) { GenTree* addr = storeInd->Addr(); addr = addr->gtSkipReloadOrCopy(); assert(addr->OperIs(GT_LCL_VAR, GT_LCL_VAR_ADDR, GT_CLS_VAR_ADDR, GT_LEA, GT_CNS_INT)); ssize_t offset = 0; if (addr->OperGet() != GT_CLS_VAR_ADDR) { offset = storeInd->Offset(); } if (addr->isContained() && addr->OperIsLocalAddr()) { GenTreeLclVarCommon* lclVar = addr->AsLclVarCommon(); emitIns_S(ins, attr, lclVar->GetLclNum(), lclVar->GetLclOffs()); return; } instrDesc* id = emitNewInstrAmd(attr, offset); emitHandleMemOp(storeInd, id, IF_ARW, ins); id->idIns(ins); UNATIVE_OFFSET sz = emitInsSizeAM(id, insCodeMR(ins)); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } /***************************************************************************** * * Add an instruction referencing a single register. */ void emitter::emitIns_R(instruction ins, emitAttr attr, regNumber reg) { emitAttr size = EA_SIZE(attr); assert(size <= EA_PTRSIZE); noway_assert(emitVerifyEncodable(ins, size, reg)); UNATIVE_OFFSET sz; instrDesc* id = emitNewInstrSmall(attr); switch (ins) { case INS_inc: case INS_dec: #ifdef TARGET_AMD64 sz = 2; // x64 has no 1-byte opcode (it is the same encoding as the REX prefix) #else // !TARGET_AMD64 if (size == EA_1BYTE) sz = 2; // Use the long form as the small one has no 'w' bit else sz = 1; // Use short form #endif // !TARGET_AMD64 break; case INS_pop: case INS_pop_hide: case INS_push: case INS_push_hide: /* We don't currently push/pop small values */ assert(size == EA_PTRSIZE); sz = 1; break; default: /* All the sixteen INS_setCCs are contiguous. */ if (INS_seto <= ins && ins <= INS_setg) { // Rough check that we used the endpoints for the range check assert(INS_seto + 0xF == INS_setg); // The caller must specify EA_1BYTE for 'attr' assert(attr == EA_1BYTE); /* We expect this to always be a 'big' opcode */ assert(insEncodeMRreg(ins, reg, attr, insCodeMR(ins)) & 0x00FF0000); size = attr; sz = 3; break; } else { sz = 2; break; } } insFormat fmt = emitInsModeFormat(ins, IF_RRD); id->idIns(ins); id->idInsFmt(fmt); id->idReg1(reg); // Vex bytes sz += emitGetAdjustedSize(ins, attr, insEncodeMRreg(ins, reg, attr, insCodeMR(ins))); // REX byte if (IsExtendedReg(reg, attr) || TakesRexWPrefix(ins, attr)) { sz += emitGetRexPrefixSize(ins); } id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; emitAdjustStackDepthPushPop(ins); } /***************************************************************************** * * Add an instruction referencing a register and a constant. */ void emitter::emitIns_R_I(instruction ins, emitAttr attr, regNumber reg, ssize_t val DEBUGARG(GenTreeFlags gtFlags)) { emitAttr size = EA_SIZE(attr); // Allow emitting SSE2/AVX SIMD instructions of R_I form that can specify EA_16BYTE or EA_32BYTE assert(size <= EA_PTRSIZE || IsSSEOrAVXInstruction(ins)); noway_assert(emitVerifyEncodable(ins, size, reg)); #ifdef TARGET_AMD64 // mov reg, imm64 is the only opcode which takes a full 8 byte immediate // all other opcodes take a sign-extended 4-byte immediate noway_assert(size < EA_8BYTE || ins == INS_mov || ((int)val == val && !EA_IS_CNS_RELOC(attr))); #endif UNATIVE_OFFSET sz; instrDesc* id; insFormat fmt = emitInsModeFormat(ins, IF_RRD_CNS); bool valInByte = ((signed char)val == (target_ssize_t)val) && (ins != INS_mov) && (ins != INS_test); // BT reg,imm might be useful but it requires special handling of the immediate value // (it is always encoded in a byte). Let's not complicate things until this is needed. assert(ins != INS_bt); // Figure out the size of the instruction switch (ins) { case INS_mov: #ifdef TARGET_AMD64 // mov reg, imm64 is equivalent to mov reg, imm32 if the high order bits are all 0 // and this isn't a reloc constant. if (((size > EA_4BYTE) && (0 == (val & 0xFFFFFFFF00000000LL))) && !EA_IS_CNS_RELOC(attr)) { attr = size = EA_4BYTE; } if (size > EA_4BYTE) { sz = 9; // Really it is 10, but we'll add one more later break; } #endif // TARGET_AMD64 sz = 5; break; case INS_rcl_N: case INS_rcr_N: case INS_rol_N: case INS_ror_N: case INS_shl_N: case INS_shr_N: case INS_sar_N: assert(val != 1); fmt = IF_RRW_SHF; sz = 3; val &= 0x7F; valInByte = true; // shift amount always placed in a byte break; default: if (EA_IS_CNS_RELOC(attr)) { valInByte = false; // relocs can't be placed in a byte } if (valInByte) { if (IsSSEOrAVXInstruction(ins)) { bool includeRexPrefixSize = true; // Do not get the RexSize() but just decide if it will be included down further and if yes, // do not include it again. if (IsExtendedReg(reg, attr) || TakesRexWPrefix(ins, size) || instrIsExtendedReg3opImul(ins)) { includeRexPrefixSize = false; } sz = emitInsSize(insCodeMI(ins), includeRexPrefixSize); sz += 1; } else if (size == EA_1BYTE && reg == REG_EAX && !instrIs3opImul(ins)) { sz = 2; } else { sz = 3; } } else { assert(!IsSSEOrAVXInstruction(ins)); if (reg == REG_EAX && !instrIs3opImul(ins)) { sz = 1; } else { sz = 2; } #ifdef TARGET_AMD64 if (size > EA_4BYTE) { // We special-case anything that takes a full 8-byte constant. sz += 4; } else #endif // TARGET_AMD64 { sz += EA_SIZE_IN_BYTES(attr); } } break; } sz += emitGetAdjustedSize(ins, attr, insCodeMI(ins)); // Do we need a REX prefix for AMD64? We need one if we are using any extended register (REX.R), or if we have a // 64-bit sized operand (REX.W). Note that IMUL in our encoding is special, with a "built-in", implicit, target // register. So we also need to check if that built-in register is an extended register. if (IsExtendedReg(reg, attr) || TakesRexWPrefix(ins, size) || instrIsExtendedReg3opImul(ins)) { sz += emitGetRexPrefixSize(ins); } id = emitNewInstrSC(attr, val); id->idIns(ins); id->idInsFmt(fmt); id->idReg1(reg); id->idCodeSize(sz); INDEBUG(id->idDebugOnlyInfo()->idFlags = gtFlags); dispIns(id); emitCurIGsize += sz; if (reg == REG_ESP) { emitAdjustStackDepth(ins, val); } } /***************************************************************************** * * Add an instruction referencing an integer constant. */ void emitter::emitIns_I(instruction ins, emitAttr attr, cnsval_ssize_t val) { UNATIVE_OFFSET sz; instrDesc* id; bool valInByte = ((signed char)val == (target_ssize_t)val); #ifdef TARGET_AMD64 // mov reg, imm64 is the only opcode which takes a full 8 byte immediate // all other opcodes take a sign-extended 4-byte immediate noway_assert(EA_SIZE(attr) < EA_8BYTE || !EA_IS_CNS_RELOC(attr)); #endif if (EA_IS_CNS_RELOC(attr)) { valInByte = false; // relocs can't be placed in a byte } switch (ins) { case INS_loop: case INS_jge: sz = 2; break; case INS_ret: sz = 3; break; case INS_push_hide: case INS_push: sz = valInByte ? 2 : 5; break; default: NO_WAY("unexpected instruction"); } id = emitNewInstrSC(attr, val); id->idIns(ins); id->idInsFmt(IF_CNS); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; emitAdjustStackDepthPushPop(ins); } /***************************************************************************** * * Add a "jump through a table" instruction. */ void emitter::emitIns_IJ(emitAttr attr, regNumber reg, unsigned base) { assert(EA_SIZE(attr) == EA_4BYTE); UNATIVE_OFFSET sz = 3 + 4; const instruction ins = INS_i_jmp; if (IsExtendedReg(reg, attr)) { sz += emitGetRexPrefixSize(ins); } instrDesc* id = emitNewInstrAmd(attr, base); id->idIns(ins); id->idInsFmt(IF_ARD); id->idAddr()->iiaAddrMode.amBaseReg = REG_NA; id->idAddr()->iiaAddrMode.amIndxReg = reg; id->idAddr()->iiaAddrMode.amScale = emitter::OPSZP; #ifdef DEBUG id->idDebugOnlyInfo()->idMemCookie = base; #endif id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } /***************************************************************************** * * Add an instruction with a static data member operand. If 'size' is 0, the * instruction operates on the address of the static member instead of its * value (e.g. "push offset clsvar", rather than "push dword ptr [clsvar]"). */ void emitter::emitIns_C(instruction ins, emitAttr attr, CORINFO_FIELD_HANDLE fldHnd, int offs) { // Static always need relocs if (!jitStaticFldIsGlobAddr(fldHnd)) { attr = EA_SET_FLG(attr, EA_DSP_RELOC_FLG); } UNATIVE_OFFSET sz; instrDesc* id; /* Are we pushing the offset of the class variable? */ if (EA_IS_OFFSET(attr)) { assert(ins == INS_push); sz = 1 + TARGET_POINTER_SIZE; id = emitNewInstrDsp(EA_1BYTE, offs); id->idIns(ins); id->idInsFmt(IF_MRD_OFF); } else { insFormat fmt = emitInsModeFormat(ins, IF_MRD); id = emitNewInstrDsp(attr, offs); id->idIns(ins); id->idInsFmt(fmt); sz = emitInsSizeCV(id, insCodeMR(ins)); } if (TakesRexWPrefix(ins, attr)) { // REX.W prefix sz += emitGetRexPrefixSize(ins); } id->idAddr()->iiaFieldHnd = fldHnd; id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; emitAdjustStackDepthPushPop(ins); } //------------------------------------------------------------------------ // IsMovInstruction: Determines whether a give instruction is a move instruction // // Arguments: // ins -- The instruction being checked // // Return Value: // true if the instruction is a qualifying move instruction; otherwise, false // // Remarks: // This methods covers most kinds of two operand move instructions that copy a // value between two registers. It does not cover all move-like instructions // and so doesn't currently cover things like movsb/movsw/movsd/movsq or cmovcc // and doesn't currently cover cases where a value is read/written from memory. // // The reason it doesn't cover all instructions was namely to limit the scope // of the initial change to that which was impactful to move elision so that // it could be centrally managed and optimized. It may be beneficial to support // the other move instructions in the future but that may require more extensive // changes to ensure relevant codegen/emit paths flow and check things correctly. bool emitter::IsMovInstruction(instruction ins) { switch (ins) { case INS_mov: case INS_movapd: case INS_movaps: case INS_movd: case INS_movdqa: case INS_movdqu: case INS_movsdsse2: case INS_movss: case INS_movsx: case INS_movupd: case INS_movups: case INS_movzx: { return true; } #if defined(TARGET_AMD64) case INS_movq: case INS_movsxd: { return true; } #endif // TARGET_AMD64 default: { return false; } } } //------------------------------------------------------------------------ // IsJccInstruction: Determine if an instruction is a conditional jump instruction. // // Arguments: // ins -- The instruction being checked // // Return Value: // true if the instruction qualifies; otherwise, false // bool emitter::IsJccInstruction(instruction ins) { return ((ins >= INS_jo) && (ins <= INS_jg)) || ((ins >= INS_l_jo) && (ins <= INS_l_jg)); } //------------------------------------------------------------------------ // IsJmpInstruction: Determine if an instruction is a jump instruction but NOT a conditional jump instruction. // // Arguments: // ins -- The instruction being checked // // Return Value: // true if the instruction qualifies; otherwise, false // bool emitter::IsJmpInstruction(instruction ins) { return (ins == INS_i_jmp) || (ins == INS_jmp) || (ins == INS_l_jmp) || (ins == INS_tail_i_jmp); } // TODO-XArch-CQ: There are places where the fact that an instruction zero-extends // is not an important detail, such as when "regular" floating-point code is generated // // This differs from cases like HWIntrinsics that deal with the entire vector and so // they need to be "aware" that a given move impacts the upper-bits. // // Ideally we can detect this difference, likely via canIgnoreSideEffects, and allow // the below optimizations for those scenarios as well. // Track whether the instruction has a zero/sign-extension or clearing of the upper-bits as a side-effect bool emitter::HasSideEffect(instruction ins, emitAttr size) { bool hasSideEffect = false; switch (ins) { case INS_mov: { // non EA_PTRSIZE moves may zero-extend the source hasSideEffect = (size != EA_PTRSIZE); break; } case INS_movapd: case INS_movaps: case INS_movdqa: case INS_movdqu: case INS_movupd: case INS_movups: { // non EA_32BYTE moves clear the upper bits under VEX encoding hasSideEffect = UseVEXEncoding() && (size != EA_32BYTE); break; } case INS_movd: { // Clears the upper bits hasSideEffect = true; break; } case INS_movsdsse2: case INS_movss: { // Clears the upper bits under VEX encoding hasSideEffect = UseVEXEncoding(); break; } case INS_movsx: case INS_movzx: { // Sign/Zero-extends the source hasSideEffect = true; break; } #if defined(TARGET_AMD64) case INS_movq: { // Clears the upper bits hasSideEffect = true; break; } case INS_movsxd: { // Sign-extends the source hasSideEffect = true; break; } #endif // TARGET_AMD64 default: { unreached(); } } return hasSideEffect; } //---------------------------------------------------------------------------------------- // IsRedundantMov: // Check if the current `mov` instruction is redundant and can be omitted. // A `mov` is redundant in following 3 cases: // // 1. Move to same register on TARGET_AMD64 // (Except 4-byte movement like "mov eax, eax" which zeros out upper bits of eax register) // // mov rax, rax // // 2. Move that is identical to last instruction emitted. // // mov rax, rbx # <-- last instruction // mov rax, rbx # <-- current instruction can be omitted. // // 3. Opposite Move as that of last instruction emitted. // // mov rax, rbx # <-- last instruction // mov rbx, rax # <-- current instruction can be omitted. // // Arguments: // ins - The current instruction // fmt - The current format // size - Operand size of current instruction // dst - The current destination // src - The current source // canIgnoreSideEffects - The move can be skipped as it doesn't represent special semantics // // Return Value: // true if the move instruction is redundant; otherwise, false. bool emitter::IsRedundantMov( instruction ins, insFormat fmt, emitAttr size, regNumber dst, regNumber src, bool canIgnoreSideEffects) { assert(IsMovInstruction(ins)); if (canIgnoreSideEffects && (dst == src)) { // These elisions used to be explicit even when optimizations were disabled // Some instructions have a side effect and shouldn't be skipped // however existing codepaths were skipping these instructions in // certain scenarios and so we skip them as well for back-compat // when canIgnoreSideEffects is true (see below for which have a // side effect). // // Long term, these paths should be audited and should likely be // replaced with copies rather than extensions. return true; } if (!emitComp->opts.OptimizationEnabled()) { // The remaining move elisions should only happen if optimizations are enabled return false; } // Skip optimization if current instruction creates a GC live value. if (EA_IS_GCREF_OR_BYREF(size)) { return false; } bool hasSideEffect = HasSideEffect(ins, size); // Check if we are already in the correct register and don't have a side effect if ((dst == src) && !hasSideEffect) { JITDUMP("\n -- suppressing mov because src and dst is same register and the mov has no side-effects.\n"); return true; } bool isFirstInstrInBlock = (emitCurIGinsCnt == 0) && ((emitCurIG->igFlags & IGF_EXTEND) == 0); // TODO-XArch-CQ: Certain instructions, such as movaps vs movups, are equivalent in // functionality even if their actual identifier differs and we should optimize these if (isFirstInstrInBlock || // Don't optimize if instruction is the first instruction in IG. (emitLastIns == nullptr) || // or if a last instruction doesn't exist (emitLastIns->idIns() != ins) || // or if the instruction is different from the last instruction (emitLastIns->idOpSize() != size) || // or if the operand size is different from the last instruction (emitLastIns->idInsFmt() != fmt)) // or if the format is different from the last instruction { return false; } regNumber lastDst = emitLastIns->idReg1(); regNumber lastSrc = emitLastIns->idReg2(); // Check if we did same move in last instruction, side effects don't matter since they already happened if ((lastDst == dst) && (lastSrc == src)) { JITDUMP("\n -- suppressing mov because last instruction already moved from src to dst register.\n"); return true; } // Check if we did a switched mov in the last instruction and don't have a side effect if ((lastDst == src) && (lastSrc == dst) && !hasSideEffect) { JITDUMP("\n -- suppressing mov because last instruction already moved from dst to src register and the mov has " "no side-effects.\n"); return true; } return false; } //------------------------------------------------------------------------ // EmitMovsxAsCwde: try to emit "movsxd rax, eax" and "movsx eax, ax" as // "cdqe" and "cwde" as a code size optimization. // // Arguments: // ins - The instruction for the original mov // size - The size of the original mov // dst - The destination register for the original mov // src - The source register for the original mov // // Return Value: // "true" if the optimization succeded, in which case the instruction can be // counted as emitted, "false" otherwise. // bool emitter::EmitMovsxAsCwde(instruction ins, emitAttr size, regNumber dst, regNumber src) { if ((src == REG_EAX) && (src == dst)) { #ifdef TARGET_64BIT // "movsxd rax, eax". if ((ins == INS_movsxd) && (size == EA_4BYTE)) { // "cdqe". emitIns(INS_cwde, EA_8BYTE); return true; } #endif // "movsx eax, ax". if ((ins == INS_movsx) && (size == EA_2BYTE)) { // "cwde". emitIns(INS_cwde, EA_4BYTE); return true; } } return false; } //------------------------------------------------------------------------ // emitIns_Mov: Emits a move instruction // // Arguments: // ins -- The instruction being emitted // attr -- The emit attribute // dstReg -- The destination register // srcReg -- The source register // canSkip -- true if the move can be elided when dstReg == srcReg, otherwise false // void emitter::emitIns_Mov(instruction ins, emitAttr attr, regNumber dstReg, regNumber srcReg, bool canSkip) { // Only move instructions can use emitIns_Mov assert(IsMovInstruction(ins)); #if DEBUG switch (ins) { case INS_mov: case INS_movsx: case INS_movzx: { assert(isGeneralRegister(dstReg) && isGeneralRegister(srcReg)); break; } case INS_movapd: case INS_movaps: case INS_movdqa: case INS_movdqu: case INS_movsdsse2: case INS_movss: case INS_movupd: case INS_movups: { assert(isFloatReg(dstReg) && isFloatReg(srcReg)); break; } case INS_movd: { assert(isFloatReg(dstReg) != isFloatReg(srcReg)); break; } #if defined(TARGET_AMD64) case INS_movq: { assert(isFloatReg(dstReg) && isFloatReg(srcReg)); break; } case INS_movsxd: { assert(isGeneralRegister(dstReg) && isGeneralRegister(srcReg)); break; } #endif // TARGET_AMD64 default: { unreached(); } } #endif emitAttr size = EA_SIZE(attr); assert(size <= EA_32BYTE); noway_assert(emitVerifyEncodable(ins, size, dstReg, srcReg)); insFormat fmt = emitInsModeFormat(ins, IF_RRD_RRD); if (IsRedundantMov(ins, fmt, attr, dstReg, srcReg, canSkip)) { return; } if (EmitMovsxAsCwde(ins, size, dstReg, srcReg)) { return; } UNATIVE_OFFSET sz = emitInsSizeRR(ins, dstReg, srcReg, attr); instrDesc* id = emitNewInstrSmall(attr); id->idIns(ins); id->idInsFmt(fmt); id->idReg1(dstReg); id->idReg2(srcReg); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } /***************************************************************************** * * Add an instruction with two register operands. */ void emitter::emitIns_R_R(instruction ins, emitAttr attr, regNumber reg1, regNumber reg2) { if (IsMovInstruction(ins)) { assert(!"Please use emitIns_Mov() to correctly handle move elision"); emitIns_Mov(ins, attr, reg1, reg2, /* canSkip */ false); } emitAttr size = EA_SIZE(attr); assert(size <= EA_32BYTE); noway_assert(emitVerifyEncodable(ins, size, reg1, reg2)); UNATIVE_OFFSET sz = emitInsSizeRR(ins, reg1, reg2, attr); /* Special case: "XCHG" uses a different format */ insFormat fmt = (ins == INS_xchg) ? IF_RRW_RRW : emitInsModeFormat(ins, IF_RRD_RRD); instrDesc* id = emitNewInstrSmall(attr); id->idIns(ins); id->idInsFmt(fmt); id->idReg1(reg1); id->idReg2(reg2); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } /***************************************************************************** * * Add an instruction with two register operands and an integer constant. */ void emitter::emitIns_R_R_I(instruction ins, emitAttr attr, regNumber reg1, regNumber reg2, int ival) { #ifdef TARGET_AMD64 // mov reg, imm64 is the only opcode which takes a full 8 byte immediate // all other opcodes take a sign-extended 4-byte immediate noway_assert(EA_SIZE(attr) < EA_8BYTE || !EA_IS_CNS_RELOC(attr)); #endif instrDesc* id = emitNewInstrSC(attr, ival); id->idIns(ins); id->idInsFmt(IF_RRW_RRW_CNS); id->idReg1(reg1); id->idReg2(reg2); code_t code = 0; switch (ins) { case INS_pextrb: case INS_pextrd: case INS_pextrq: case INS_pextrw_sse41: case INS_extractps: case INS_vextractf128: case INS_vextracti128: case INS_shld: case INS_shrd: { code = insCodeMR(ins); break; } case INS_psrldq: case INS_pslldq: { code = insCodeMI(ins); break; } default: { code = insCodeRM(ins); break; } } UNATIVE_OFFSET sz = emitInsSizeRR(id, code, ival); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } void emitter::emitIns_AR(instruction ins, emitAttr attr, regNumber base, int offs) { assert(ins == INS_prefetcht0 || ins == INS_prefetcht1 || ins == INS_prefetcht2 || ins == INS_prefetchnta); instrDesc* id = emitNewInstrAmd(attr, offs); id->idIns(ins); id->idInsFmt(IF_ARD); id->idAddr()->iiaAddrMode.amBaseReg = base; id->idAddr()->iiaAddrMode.amIndxReg = REG_NA; UNATIVE_OFFSET sz = emitInsSizeAM(id, insCodeMR(ins)); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } //------------------------------------------------------------------------ // emitIns_AR_R_R: emits the code for an instruction that takes a base memory register, two register operands // and that does not return a value // // Arguments: // ins -- The instruction being emitted // attr -- The emit attribute // targetReg -- The target register // op2Reg -- The register of the second operand // op3Reg -- The register of the third operand // base -- The base register used for the memory address (first operand) // offs -- The offset from base // void emitter::emitIns_AR_R_R( instruction ins, emitAttr attr, regNumber op2Reg, regNumber op3Reg, regNumber base, int offs) { assert(IsSSEOrAVXInstruction(ins)); assert(IsThreeOperandAVXInstruction(ins)); instrDesc* id = emitNewInstrAmd(attr, offs); id->idIns(ins); id->idReg1(op2Reg); id->idReg2(op3Reg); id->idInsFmt(IF_AWR_RRD_RRD); id->idAddr()->iiaAddrMode.amBaseReg = base; id->idAddr()->iiaAddrMode.amIndxReg = REG_NA; UNATIVE_OFFSET sz = emitInsSizeAM(id, insCodeMR(ins)); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } void emitter::emitIns_R_A(instruction ins, emitAttr attr, regNumber reg1, GenTreeIndir* indir) { ssize_t offs = indir->Offset(); instrDesc* id = emitNewInstrAmd(attr, offs); id->idIns(ins); id->idReg1(reg1); emitHandleMemOp(indir, id, IF_RRW_ARD, ins); UNATIVE_OFFSET sz = emitInsSizeAM(id, insCodeRM(ins)); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } void emitter::emitIns_R_A_I(instruction ins, emitAttr attr, regNumber reg1, GenTreeIndir* indir, int ival) { noway_assert(emitVerifyEncodable(ins, EA_SIZE(attr), reg1)); assert(IsSSEOrAVXInstruction(ins)); ssize_t offs = indir->Offset(); instrDesc* id = emitNewInstrAmdCns(attr, offs, ival); id->idIns(ins); id->idReg1(reg1); emitHandleMemOp(indir, id, IF_RRW_ARD_CNS, ins); UNATIVE_OFFSET sz = emitInsSizeAM(id, insCodeRM(ins), ival); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } void emitter::emitIns_R_AR_I(instruction ins, emitAttr attr, regNumber reg1, regNumber base, int offs, int ival) { noway_assert(emitVerifyEncodable(ins, EA_SIZE(attr), reg1)); assert(IsSSEOrAVXInstruction(ins)); instrDesc* id = emitNewInstrAmdCns(attr, offs, ival); id->idIns(ins); id->idReg1(reg1); id->idInsFmt(IF_RRW_ARD_CNS); id->idAddr()->iiaAddrMode.amBaseReg = base; id->idAddr()->iiaAddrMode.amIndxReg = REG_NA; UNATIVE_OFFSET sz = emitInsSizeAM(id, insCodeRM(ins), ival); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } void emitter::emitIns_R_C_I( instruction ins, emitAttr attr, regNumber reg1, CORINFO_FIELD_HANDLE fldHnd, int offs, int ival) { // Static always need relocs if (!jitStaticFldIsGlobAddr(fldHnd)) { attr = EA_SET_FLG(attr, EA_DSP_RELOC_FLG); } noway_assert(emitVerifyEncodable(ins, EA_SIZE(attr), reg1)); assert(IsSSEOrAVXInstruction(ins)); instrDesc* id = emitNewInstrCnsDsp(attr, ival, offs); id->idIns(ins); id->idInsFmt(IF_RRW_MRD_CNS); id->idReg1(reg1); id->idAddr()->iiaFieldHnd = fldHnd; UNATIVE_OFFSET sz = emitInsSizeCV(id, insCodeRM(ins), ival); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } void emitter::emitIns_R_S_I(instruction ins, emitAttr attr, regNumber reg1, int varx, int offs, int ival) { noway_assert(emitVerifyEncodable(ins, EA_SIZE(attr), reg1)); assert(IsSSEOrAVXInstruction(ins)); instrDesc* id = emitNewInstrCns(attr, ival); id->idIns(ins); id->idInsFmt(IF_RRW_SRD_CNS); id->idReg1(reg1); id->idAddr()->iiaLclVar.initLclVarAddr(varx, offs); #ifdef DEBUG id->idDebugOnlyInfo()->idVarRefOffs = emitVarRefOffs; #endif UNATIVE_OFFSET sz = emitInsSizeSV(id, insCodeRM(ins), varx, offs, ival); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } void emitter::emitIns_R_R_A(instruction ins, emitAttr attr, regNumber reg1, regNumber reg2, GenTreeIndir* indir) { assert(IsSSEOrAVXInstruction(ins)); assert(IsThreeOperandAVXInstruction(ins)); ssize_t offs = indir->Offset(); instrDesc* id = emitNewInstrAmd(attr, offs); id->idIns(ins); id->idReg1(reg1); id->idReg2(reg2); emitHandleMemOp(indir, id, IF_RWR_RRD_ARD, ins); UNATIVE_OFFSET sz = emitInsSizeAM(id, insCodeRM(ins)); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } void emitter::emitIns_R_R_AR(instruction ins, emitAttr attr, regNumber reg1, regNumber reg2, regNumber base, int offs) { assert(IsSSEOrAVXInstruction(ins)); assert(IsThreeOperandAVXInstruction(ins)); instrDesc* id = emitNewInstrAmd(attr, offs); id->idIns(ins); id->idReg1(reg1); id->idReg2(reg2); id->idInsFmt(IF_RWR_RRD_ARD); id->idAddr()->iiaAddrMode.amBaseReg = base; id->idAddr()->iiaAddrMode.amIndxReg = REG_NA; UNATIVE_OFFSET sz = emitInsSizeAM(id, insCodeRM(ins)); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } //------------------------------------------------------------------------ // IsAVX2GatherInstruction: return true if the instruction is AVX2 Gather // // Arguments: // ins - the instruction to check // Return Value: // true if the instruction is AVX2 Gather // bool IsAVX2GatherInstruction(instruction ins) { switch (ins) { case INS_vpgatherdd: case INS_vpgatherdq: case INS_vpgatherqd: case INS_vpgatherqq: case INS_vgatherdps: case INS_vgatherdpd: case INS_vgatherqps: case INS_vgatherqpd: return true; default: return false; } } //------------------------------------------------------------------------ // emitIns_R_AR_R: Emits an AVX2 Gather instructions // // Arguments: // ins - the instruction to emit // attr - the instruction operand size // reg1 - the destination and first source operand // reg2 - the mask operand (encoded in VEX.vvvv) // base - the base register of address to load // index - the index register of VSIB // scale - the scale number of VSIB // offs - the offset added to the memory address from base // void emitter::emitIns_R_AR_R(instruction ins, emitAttr attr, regNumber reg1, regNumber reg2, regNumber base, regNumber index, int scale, int offs) { assert(IsAVX2GatherInstruction(ins)); instrDesc* id = emitNewInstrAmd(attr, offs); id->idIns(ins); id->idReg1(reg1); id->idReg2(reg2); id->idInsFmt(IF_RWR_ARD_RRD); id->idAddr()->iiaAddrMode.amBaseReg = base; id->idAddr()->iiaAddrMode.amIndxReg = index; id->idAddr()->iiaAddrMode.amScale = emitEncodeSize((emitAttr)scale); UNATIVE_OFFSET sz = emitInsSizeAM(id, insCodeRM(ins)); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } void emitter::emitIns_R_R_C( instruction ins, emitAttr attr, regNumber reg1, regNumber reg2, CORINFO_FIELD_HANDLE fldHnd, int offs) { assert(IsSSEOrAVXInstruction(ins)); assert(IsThreeOperandAVXInstruction(ins)); // Static always need relocs if (!jitStaticFldIsGlobAddr(fldHnd)) { attr = EA_SET_FLG(attr, EA_DSP_RELOC_FLG); } instrDesc* id = emitNewInstrDsp(attr, offs); id->idIns(ins); id->idInsFmt(IF_RWR_RRD_MRD); id->idReg1(reg1); id->idReg2(reg2); id->idAddr()->iiaFieldHnd = fldHnd; UNATIVE_OFFSET sz = emitInsSizeCV(id, insCodeRM(ins)); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } /***************************************************************************** * * Add an instruction with three register operands. */ void emitter::emitIns_R_R_R(instruction ins, emitAttr attr, regNumber targetReg, regNumber reg1, regNumber reg2) { assert(IsSSEOrAVXInstruction(ins)); assert(IsThreeOperandAVXInstruction(ins)); instrDesc* id = emitNewInstr(attr); id->idIns(ins); id->idInsFmt(IF_RWR_RRD_RRD); id->idReg1(targetReg); id->idReg2(reg1); id->idReg3(reg2); UNATIVE_OFFSET sz = emitInsSizeRR(id, insCodeRM(ins)); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } void emitter::emitIns_R_R_S(instruction ins, emitAttr attr, regNumber reg1, regNumber reg2, int varx, int offs) { assert(IsSSEOrAVXInstruction(ins)); assert(IsThreeOperandAVXInstruction(ins)); instrDesc* id = emitNewInstr(attr); id->idIns(ins); id->idInsFmt(IF_RWR_RRD_SRD); id->idReg1(reg1); id->idReg2(reg2); id->idAddr()->iiaLclVar.initLclVarAddr(varx, offs); #ifdef DEBUG id->idDebugOnlyInfo()->idVarRefOffs = emitVarRefOffs; #endif UNATIVE_OFFSET sz = emitInsSizeSV(id, insCodeRM(ins), varx, offs); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } void emitter::emitIns_R_R_A_I( instruction ins, emitAttr attr, regNumber reg1, regNumber reg2, GenTreeIndir* indir, int ival, insFormat fmt) { assert(IsSSEOrAVXInstruction(ins)); assert(IsThreeOperandAVXInstruction(ins)); ssize_t offs = indir->Offset(); instrDesc* id = emitNewInstrAmdCns(attr, offs, ival); id->idIns(ins); id->idReg1(reg1); id->idReg2(reg2); emitHandleMemOp(indir, id, fmt, ins); UNATIVE_OFFSET sz = emitInsSizeAM(id, insCodeRM(ins), ival); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } void emitter::emitIns_R_R_AR_I( instruction ins, emitAttr attr, regNumber reg1, regNumber reg2, regNumber base, int offs, int ival) { assert(IsSSEOrAVXInstruction(ins)); assert(IsThreeOperandAVXInstruction(ins)); instrDesc* id = emitNewInstrAmdCns(attr, offs, ival); id->idIns(ins); id->idReg1(reg1); id->idReg2(reg2); id->idInsFmt(IF_RWR_RRD_ARD_CNS); id->idAddr()->iiaAddrMode.amBaseReg = base; id->idAddr()->iiaAddrMode.amIndxReg = REG_NA; UNATIVE_OFFSET sz = emitInsSizeAM(id, insCodeRM(ins), ival); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } void emitter::emitIns_R_R_C_I( instruction ins, emitAttr attr, regNumber reg1, regNumber reg2, CORINFO_FIELD_HANDLE fldHnd, int offs, int ival) { assert(IsSSEOrAVXInstruction(ins)); assert(IsThreeOperandAVXInstruction(ins)); // Static always need relocs if (!jitStaticFldIsGlobAddr(fldHnd)) { attr = EA_SET_FLG(attr, EA_DSP_RELOC_FLG); } instrDesc* id = emitNewInstrCnsDsp(attr, ival, offs); id->idIns(ins); id->idInsFmt(IF_RWR_RRD_MRD_CNS); id->idReg1(reg1); id->idReg2(reg2); id->idAddr()->iiaFieldHnd = fldHnd; UNATIVE_OFFSET sz = emitInsSizeCV(id, insCodeRM(ins), ival); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } /********************************************************************************** * emitIns_R_R_R_I: Add an instruction with three register operands and an immediate. * * Arguments: * ins - the instruction to add * attr - the emitter attribute for instruction * targetReg - the target (destination) register * reg1 - the first source register * reg2 - the second source register * ival - the immediate value */ void emitter::emitIns_R_R_R_I( instruction ins, emitAttr attr, regNumber targetReg, regNumber reg1, regNumber reg2, int ival) { assert(IsSSEOrAVXInstruction(ins)); assert(IsThreeOperandAVXInstruction(ins)); instrDesc* id = emitNewInstrCns(attr, ival); id->idIns(ins); id->idInsFmt(IF_RWR_RRD_RRD_CNS); id->idReg1(targetReg); id->idReg2(reg1); id->idReg3(reg2); code_t code = 0; switch (ins) { case INS_pextrb: case INS_pextrd: case INS_pextrq: case INS_pextrw_sse41: case INS_extractps: case INS_vextractf128: case INS_vextracti128: { code = insCodeMR(ins); break; } case INS_psrldq: case INS_pslldq: { code = insCodeMI(ins); break; } default: { code = insCodeRM(ins); break; } } UNATIVE_OFFSET sz = emitInsSizeRR(id, code, ival); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } void emitter::emitIns_R_R_S_I( instruction ins, emitAttr attr, regNumber reg1, regNumber reg2, int varx, int offs, int ival) { assert(IsSSEOrAVXInstruction(ins)); assert(IsThreeOperandAVXInstruction(ins)); instrDesc* id = emitNewInstrCns(attr, ival); id->idIns(ins); id->idInsFmt(IF_RWR_RRD_SRD_CNS); id->idReg1(reg1); id->idReg2(reg2); id->idAddr()->iiaLclVar.initLclVarAddr(varx, offs); #ifdef DEBUG id->idDebugOnlyInfo()->idVarRefOffs = emitVarRefOffs; #endif UNATIVE_OFFSET sz = emitInsSizeSV(id, insCodeRM(ins), varx, offs, ival); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } //------------------------------------------------------------------------ // encodeXmmRegAsIval: Encodes a XMM register into imm[7:4] for use by a SIMD instruction // // Arguments // opReg -- The register being encoded // // Returns: // opReg encoded in imm[7:4] static int encodeXmmRegAsIval(regNumber opReg) { // AVX/AVX2 supports 4-reg format for vblendvps/vblendvpd/vpblendvb, // which encodes the fourth register into imm8[7:4] assert(opReg >= XMMBASE); int ival = (opReg - XMMBASE) << 4; assert((ival >= 0) && (ival <= 255)); return (int8_t)ival; } //------------------------------------------------------------------------ // emitIns_R_R_A_R: emits the code for an instruction that takes a register operand, a GenTreeIndir address, // another register operand, and that returns a value in register // // Arguments: // ins -- The instruction being emitted // attr -- The emit attribute // targetReg -- The target register // op1Reg -- The register of the first operand // op3Reg -- The register of the third operand // indir -- The GenTreeIndir used for the memory address // // Remarks: // op2 is built from indir // void emitter::emitIns_R_R_A_R( instruction ins, emitAttr attr, regNumber targetReg, regNumber op1Reg, regNumber op3Reg, GenTreeIndir* indir) { assert(isAvxBlendv(ins)); assert(UseVEXEncoding()); int ival = encodeXmmRegAsIval(op3Reg); ssize_t offs = indir->Offset(); instrDesc* id = emitNewInstrAmdCns(attr, offs, ival); id->idIns(ins); id->idReg1(targetReg); id->idReg2(op1Reg); emitHandleMemOp(indir, id, IF_RWR_RRD_ARD_RRD, ins); UNATIVE_OFFSET sz = emitInsSizeAM(id, insCodeRM(ins), ival); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } //------------------------------------------------------------------------ // emitIns_R_R_AR_R: emits the code for an instruction that takes a register operand, a base memory // register, another register operand, and that returns a value in register // // Arguments: // ins -- The instruction being emitted // attr -- The emit attribute // targetReg -- The target register // op1Reg -- The register of the first operands // op3Reg -- The register of the third operand // base -- The base register used for the memory address // offs -- The offset added to the memory address from base // // Remarks: // op2 is built from base + offs // void emitter::emitIns_R_R_AR_R( instruction ins, emitAttr attr, regNumber targetReg, regNumber op1Reg, regNumber op3Reg, regNumber base, int offs) { assert(isAvxBlendv(ins)); assert(UseVEXEncoding()); int ival = encodeXmmRegAsIval(op3Reg); instrDesc* id = emitNewInstrAmdCns(attr, offs, ival); id->idIns(ins); id->idReg1(targetReg); id->idReg2(op1Reg); id->idInsFmt(IF_RWR_RRD_ARD_RRD); id->idAddr()->iiaAddrMode.amBaseReg = base; id->idAddr()->iiaAddrMode.amIndxReg = REG_NA; UNATIVE_OFFSET sz = emitInsSizeAM(id, insCodeRM(ins), ival); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } //------------------------------------------------------------------------ // emitIns_R_R_C_R: emits the code for an instruction that takes a register operand, a field handle + // offset, another register operand, and that returns a value in register // // Arguments: // ins -- The instruction being emitted // attr -- The emit attribute // targetReg -- The target register // op1Reg -- The register of the first operand // op3Reg -- The register of the third operand // fldHnd -- The CORINFO_FIELD_HANDLE used for the memory address // offs -- The offset added to the memory address from fldHnd // // Remarks: // op2 is built from fldHnd + offs // void emitter::emitIns_R_R_C_R(instruction ins, emitAttr attr, regNumber targetReg, regNumber op1Reg, regNumber op3Reg, CORINFO_FIELD_HANDLE fldHnd, int offs) { assert(isAvxBlendv(ins)); assert(UseVEXEncoding()); // Static always need relocs if (!jitStaticFldIsGlobAddr(fldHnd)) { attr = EA_SET_FLG(attr, EA_DSP_RELOC_FLG); } int ival = encodeXmmRegAsIval(op3Reg); instrDesc* id = emitNewInstrCnsDsp(attr, ival, offs); id->idIns(ins); id->idReg1(targetReg); id->idReg2(op1Reg); id->idInsFmt(IF_RWR_RRD_MRD_RRD); id->idAddr()->iiaFieldHnd = fldHnd; UNATIVE_OFFSET sz = emitInsSizeCV(id, insCodeRM(ins), ival); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } //------------------------------------------------------------------------ // emitIns_R_R_R_S: emits the code for a instruction that takes a register operand, a variable index + // offset, another register operand, and that returns a value in register // // Arguments: // ins -- The instruction being emitted // attr -- The emit attribute // targetReg -- The target register // op1Reg -- The register of the first operand // op3Reg -- The register of the third operand // varx -- The variable index used for the memory address // offs -- The offset added to the memory address from varx // // Remarks: // op2 is built from varx + offs // void emitter::emitIns_R_R_S_R( instruction ins, emitAttr attr, regNumber targetReg, regNumber op1Reg, regNumber op3Reg, int varx, int offs) { assert(isAvxBlendv(ins)); assert(UseVEXEncoding()); int ival = encodeXmmRegAsIval(op3Reg); instrDesc* id = emitNewInstrCns(attr, ival); id->idIns(ins); id->idReg1(targetReg); id->idReg2(op1Reg); id->idInsFmt(IF_RWR_RRD_SRD_RRD); id->idAddr()->iiaLclVar.initLclVarAddr(varx, offs); UNATIVE_OFFSET sz = emitInsSizeSV(id, insCodeRM(ins), varx, offs, ival); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } void emitter::emitIns_R_R_R_R( instruction ins, emitAttr attr, regNumber targetReg, regNumber reg1, regNumber reg2, regNumber reg3) { assert(isAvxBlendv(ins)); assert(UseVEXEncoding()); int ival = encodeXmmRegAsIval(reg3); instrDesc* id = emitNewInstrCns(attr, ival); id->idIns(ins); id->idInsFmt(IF_RWR_RRD_RRD_RRD); id->idReg1(targetReg); id->idReg2(reg1); id->idReg3(reg2); id->idReg4(reg3); UNATIVE_OFFSET sz = emitInsSizeRR(id, insCodeRM(ins), ival); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } /***************************************************************************** * * Add an instruction with a register + static member operands. */ void emitter::emitIns_R_C(instruction ins, emitAttr attr, regNumber reg, CORINFO_FIELD_HANDLE fldHnd, int offs) { // Static always need relocs if (!jitStaticFldIsGlobAddr(fldHnd)) { attr = EA_SET_FLG(attr, EA_DSP_RELOC_FLG); } emitAttr size = EA_SIZE(attr); assert(size <= EA_32BYTE); noway_assert(emitVerifyEncodable(ins, size, reg)); UNATIVE_OFFSET sz; instrDesc* id; // Are we MOV'ing the offset of the class variable into EAX? if (EA_IS_OFFSET(attr)) { id = emitNewInstrDsp(EA_1BYTE, offs); id->idIns(ins); id->idInsFmt(IF_RWR_MRD_OFF); id->idReg1(reg); assert(ins == INS_mov && reg == REG_EAX); // Special case: "mov eax, [addr]" is smaller sz = 1 + TARGET_POINTER_SIZE; } else { insFormat fmt = emitInsModeFormat(ins, IF_RRD_MRD); id = emitNewInstrDsp(attr, offs); id->idIns(ins); id->idInsFmt(fmt); id->idReg1(reg); #ifdef TARGET_X86 // Special case: "mov eax, [addr]" is smaller. // This case is not enabled for amd64 as it always uses RIP relative addressing // and it results in smaller instruction size than encoding 64-bit addr in the // instruction. if (ins == INS_mov && reg == REG_EAX) { sz = 1 + TARGET_POINTER_SIZE; if (size == EA_2BYTE) sz += 1; } else #endif // TARGET_X86 { sz = emitInsSizeCV(id, insCodeRM(ins)); } // Special case: mov reg, fs:[ddd] if (fldHnd == FLD_GLOBAL_FS) { sz += 1; } } id->idCodeSize(sz); id->idAddr()->iiaFieldHnd = fldHnd; dispIns(id); emitCurIGsize += sz; } /***************************************************************************** * * Add an instruction with a static member + register operands. */ void emitter::emitIns_C_R(instruction ins, emitAttr attr, CORINFO_FIELD_HANDLE fldHnd, regNumber reg, int offs) { // Static always need relocs if (!jitStaticFldIsGlobAddr(fldHnd)) { attr = EA_SET_FLG(attr, EA_DSP_RELOC_FLG); } emitAttr size = EA_SIZE(attr); #if defined(TARGET_X86) // For x86 it is valid to storeind a double sized operand in an xmm reg to memory assert(size <= EA_8BYTE); #else assert(size <= EA_PTRSIZE); #endif noway_assert(emitVerifyEncodable(ins, size, reg)); instrDesc* id = emitNewInstrDsp(attr, offs); insFormat fmt = emitInsModeFormat(ins, IF_MRD_RRD); id->idIns(ins); id->idInsFmt(fmt); id->idReg1(reg); UNATIVE_OFFSET sz; #ifdef TARGET_X86 // Special case: "mov [addr], EAX" is smaller. // This case is not enable for amd64 as it always uses RIP relative addressing // and it will result in smaller instruction size than encoding 64-bit addr in // the instruction. if (ins == INS_mov && reg == REG_EAX) { sz = 1 + TARGET_POINTER_SIZE; if (size == EA_2BYTE) sz += 1; // REX prefix if (TakesRexWPrefix(ins, attr) || IsExtendedReg(reg, attr)) { sz += emitGetRexPrefixSize(ins); } } else #endif // TARGET_X86 { sz = emitInsSizeCV(id, insCodeMR(ins)); } // Special case: mov reg, fs:[ddd] if (fldHnd == FLD_GLOBAL_FS) { sz += 1; } id->idCodeSize(sz); id->idAddr()->iiaFieldHnd = fldHnd; dispIns(id); emitCurIGsize += sz; } /***************************************************************************** * * Add an instruction with a static member + constant. */ void emitter::emitIns_C_I(instruction ins, emitAttr attr, CORINFO_FIELD_HANDLE fldHnd, int offs, int val) { // Static always need relocs if (!jitStaticFldIsGlobAddr(fldHnd)) { attr = EA_SET_FLG(attr, EA_DSP_RELOC_FLG); } insFormat fmt; switch (ins) { case INS_rcl_N: case INS_rcr_N: case INS_rol_N: case INS_ror_N: case INS_shl_N: case INS_shr_N: case INS_sar_N: assert(val != 1); fmt = IF_MRW_SHF; val &= 0x7F; break; default: fmt = emitInsModeFormat(ins, IF_MRD_CNS); break; } instrDesc* id = emitNewInstrCnsDsp(attr, val, offs); id->idIns(ins); id->idInsFmt(fmt); id->idAddr()->iiaFieldHnd = fldHnd; code_t code = insCodeMI(ins); UNATIVE_OFFSET sz = emitInsSizeCV(id, code, val); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } void emitter::emitIns_J_S(instruction ins, emitAttr attr, BasicBlock* dst, int varx, int offs) { assert(ins == INS_mov); assert(dst->bbFlags & BBF_HAS_LABEL); instrDescLbl* id = emitNewInstrLbl(); id->idIns(ins); id->idInsFmt(IF_SWR_LABEL); id->idAddr()->iiaBBlabel = dst; /* The label reference is always long */ id->idjShort = 0; id->idjKeepLong = 1; /* Record the current IG and offset within it */ id->idjIG = emitCurIG; id->idjOffs = emitCurIGsize; /* Append this instruction to this IG's jump list */ id->idjNext = emitCurIGjmpList; emitCurIGjmpList = id; UNATIVE_OFFSET sz = sizeof(INT32) + emitInsSizeSV(id, insCodeMI(ins), varx, offs); id->dstLclVar.initLclVarAddr(varx, offs); #ifdef DEBUG id->idDebugOnlyInfo()->idVarRefOffs = emitVarRefOffs; #endif #if EMITTER_STATS emitTotalIGjmps++; #endif #ifndef TARGET_AMD64 // Storing the address of a basicBlock will need a reloc // as the instruction uses the absolute address, // not a relative address. // // On Amd64, Absolute code addresses should always go through a reloc to // to be encoded as RIP rel32 offset. if (emitComp->opts.compReloc) #endif { id->idSetIsDspReloc(); } id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } /***************************************************************************** * * Add a label instruction. */ void emitter::emitIns_R_L(instruction ins, emitAttr attr, BasicBlock* dst, regNumber reg) { assert(ins == INS_lea); assert(dst->bbFlags & BBF_HAS_LABEL); instrDescJmp* id = emitNewInstrJmp(); id->idIns(ins); id->idReg1(reg); id->idInsFmt(IF_RWR_LABEL); id->idOpSize(EA_SIZE(attr)); // emitNewInstrJmp() sets the size (incorrectly) to EA_1BYTE id->idAddr()->iiaBBlabel = dst; /* The label reference is always long */ id->idjShort = 0; id->idjKeepLong = 1; /* Record the current IG and offset within it */ id->idjIG = emitCurIG; id->idjOffs = emitCurIGsize; /* Append this instruction to this IG's jump list */ id->idjNext = emitCurIGjmpList; emitCurIGjmpList = id; #ifdef DEBUG // Mark the catch return if (emitComp->compCurBB->bbJumpKind == BBJ_EHCATCHRET) { id->idDebugOnlyInfo()->idCatchRet = true; } #endif // DEBUG #if EMITTER_STATS emitTotalIGjmps++; #endif // Set the relocation flags - these give hint to zap to perform // relocation of the specified 32bit address. // // Note the relocation flags influence the size estimate. id->idSetRelocFlags(attr); UNATIVE_OFFSET sz = emitInsSizeAM(id, insCodeRM(ins)); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } /***************************************************************************** * * The following adds instructions referencing address modes. */ void emitter::emitIns_I_AR(instruction ins, emitAttr attr, int val, regNumber reg, int disp) { assert((CodeGen::instIsFP(ins) == false) && (EA_SIZE(attr) <= EA_8BYTE)); #ifdef TARGET_AMD64 // mov reg, imm64 is the only opcode which takes a full 8 byte immediate // all other opcodes take a sign-extended 4-byte immediate noway_assert(EA_SIZE(attr) < EA_8BYTE || !EA_IS_CNS_RELOC(attr)); #endif insFormat fmt; switch (ins) { case INS_rcl_N: case INS_rcr_N: case INS_rol_N: case INS_ror_N: case INS_shl_N: case INS_shr_N: case INS_sar_N: assert(val != 1); fmt = IF_ARW_SHF; val &= 0x7F; break; default: fmt = emitInsModeFormat(ins, IF_ARD_CNS); break; } /* Useful if you want to trap moves with 0 constant if (ins == INS_mov && val == 0 && EA_SIZE(attr) >= EA_4BYTE) { printf("MOV 0\n"); } */ UNATIVE_OFFSET sz; instrDesc* id = emitNewInstrAmdCns(attr, disp, val); id->idIns(ins); id->idInsFmt(fmt); id->idAddr()->iiaAddrMode.amBaseReg = reg; id->idAddr()->iiaAddrMode.amIndxReg = REG_NA; assert(emitGetInsAmdAny(id) == disp); // make sure "disp" is stored properly sz = emitInsSizeAM(id, insCodeMI(ins), val); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } void emitter::emitIns_I_AI(instruction ins, emitAttr attr, int val, ssize_t disp) { assert((CodeGen::instIsFP(ins) == false) && (EA_SIZE(attr) <= EA_8BYTE)); #ifdef TARGET_AMD64 // mov reg, imm64 is the only opcode which takes a full 8 byte immediate // all other opcodes take a sign-extended 4-byte immediate noway_assert(EA_SIZE(attr) < EA_8BYTE || !EA_IS_CNS_RELOC(attr)); #endif insFormat fmt; switch (ins) { case INS_rcl_N: case INS_rcr_N: case INS_rol_N: case INS_ror_N: case INS_shl_N: case INS_shr_N: case INS_sar_N: assert(val != 1); fmt = IF_ARW_SHF; val &= 0x7F; break; default: fmt = emitInsModeFormat(ins, IF_ARD_CNS); break; } /* Useful if you want to trap moves with 0 constant if (ins == INS_mov && val == 0 && EA_SIZE(attr) >= EA_4BYTE) { printf("MOV 0\n"); } */ UNATIVE_OFFSET sz; instrDesc* id = emitNewInstrAmdCns(attr, disp, val); id->idIns(ins); id->idInsFmt(fmt); id->idAddr()->iiaAddrMode.amBaseReg = REG_NA; id->idAddr()->iiaAddrMode.amIndxReg = REG_NA; assert(emitGetInsAmdAny(id) == disp); // make sure "disp" is stored properly sz = emitInsSizeAM(id, insCodeMI(ins), val); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } void emitter::emitIns_R_AR(instruction ins, emitAttr attr, regNumber reg, regNumber base, int disp) { emitIns_R_ARX(ins, attr, reg, base, REG_NA, 1, disp); } void emitter::emitIns_R_AI(instruction ins, emitAttr attr, regNumber ireg, ssize_t disp) { assert((CodeGen::instIsFP(ins) == false) && (EA_SIZE(attr) <= EA_8BYTE) && (ireg != REG_NA)); noway_assert(emitVerifyEncodable(ins, EA_SIZE(attr), ireg)); UNATIVE_OFFSET sz; instrDesc* id = emitNewInstrAmd(attr, disp); insFormat fmt = emitInsModeFormat(ins, IF_RRD_ARD); id->idIns(ins); id->idInsFmt(fmt); id->idReg1(ireg); id->idAddr()->iiaAddrMode.amBaseReg = REG_NA; id->idAddr()->iiaAddrMode.amIndxReg = REG_NA; assert(emitGetInsAmdAny(id) == disp); // make sure "disp" is stored properly sz = emitInsSizeAM(id, insCodeRM(ins)); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } void emitter::emitIns_AR_R(instruction ins, emitAttr attr, regNumber reg, regNumber base, cnsval_ssize_t disp) { emitIns_ARX_R(ins, attr, reg, base, REG_NA, 1, disp); } //------------------------------------------------------------------------ // emitIns_S_R_I: emits the code for an instruction that takes a stack operand, // a register operand, and an immediate. // // Arguments: // ins - The instruction being emitted // attr - The emit attribute // varNum - The varNum of the stack operand // offs - The offset for the stack operand // reg - The register operand // ival - The immediate value // void emitter::emitIns_S_R_I(instruction ins, emitAttr attr, int varNum, int offs, regNumber reg, int ival) { // This is only used for INS_vextracti128 and INS_vextractf128, and for these 'ival' must be 0 or 1. assert(ins == INS_vextracti128 || ins == INS_vextractf128); assert((ival == 0) || (ival == 1)); instrDesc* id = emitNewInstrAmdCns(attr, 0, ival); id->idIns(ins); id->idInsFmt(IF_SWR_RRD_CNS); id->idReg1(reg); id->idAddr()->iiaLclVar.initLclVarAddr(varNum, offs); #ifdef DEBUG id->idDebugOnlyInfo()->idVarRefOffs = emitVarRefOffs; #endif UNATIVE_OFFSET sz = emitInsSizeSV(id, insCodeMR(ins), varNum, offs, ival); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } void emitter::emitIns_A_R_I(instruction ins, emitAttr attr, GenTreeIndir* indir, regNumber reg, int imm) { assert((ins == INS_vextracti128) || (ins == INS_vextractf128)); assert(attr == EA_32BYTE); assert(reg != REG_NA); instrDesc* id = emitNewInstrAmdCns(attr, indir->Offset(), imm); id->idIns(ins); id->idReg1(reg); emitHandleMemOp(indir, id, IF_AWR_RRD_CNS, ins); UNATIVE_OFFSET size = emitInsSizeAM(id, insCodeMR(ins), imm); id->idCodeSize(size); dispIns(id); emitCurIGsize += size; } void emitter::emitIns_AI_R(instruction ins, emitAttr attr, regNumber ireg, ssize_t disp) { UNATIVE_OFFSET sz; instrDesc* id = emitNewInstrAmd(attr, disp); insFormat fmt; if (ireg == REG_NA) { fmt = emitInsModeFormat(ins, IF_ARD); } else { fmt = emitInsModeFormat(ins, IF_ARD_RRD); assert((CodeGen::instIsFP(ins) == false) && (EA_SIZE(attr) <= EA_8BYTE)); noway_assert(emitVerifyEncodable(ins, EA_SIZE(attr), ireg)); id->idReg1(ireg); } id->idIns(ins); id->idInsFmt(fmt); id->idAddr()->iiaAddrMode.amBaseReg = REG_NA; id->idAddr()->iiaAddrMode.amIndxReg = REG_NA; assert(emitGetInsAmdAny(id) == disp); // make sure "disp" is stored properly sz = emitInsSizeAM(id, insCodeMR(ins)); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; emitAdjustStackDepthPushPop(ins); } void emitter::emitIns_I_ARR(instruction ins, emitAttr attr, int val, regNumber reg, regNumber rg2, int disp) { assert((CodeGen::instIsFP(ins) == false) && (EA_SIZE(attr) <= EA_8BYTE)); #ifdef TARGET_AMD64 // mov reg, imm64 is the only opcode which takes a full 8 byte immediate // all other opcodes take a sign-extended 4-byte immediate noway_assert(EA_SIZE(attr) < EA_8BYTE || !EA_IS_CNS_RELOC(attr)); #endif insFormat fmt; switch (ins) { case INS_rcl_N: case INS_rcr_N: case INS_rol_N: case INS_ror_N: case INS_shl_N: case INS_shr_N: case INS_sar_N: assert(val != 1); fmt = IF_ARW_SHF; val &= 0x7F; break; default: fmt = emitInsModeFormat(ins, IF_ARD_CNS); break; } UNATIVE_OFFSET sz; instrDesc* id = emitNewInstrAmdCns(attr, disp, val); id->idIns(ins); id->idInsFmt(fmt); id->idAddr()->iiaAddrMode.amBaseReg = reg; id->idAddr()->iiaAddrMode.amIndxReg = rg2; id->idAddr()->iiaAddrMode.amScale = emitter::OPSZ1; assert(emitGetInsAmdAny(id) == disp); // make sure "disp" is stored properly sz = emitInsSizeAM(id, insCodeMI(ins), val); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } void emitter::emitIns_R_ARR(instruction ins, emitAttr attr, regNumber reg, regNumber base, regNumber index, int disp) { emitIns_R_ARX(ins, attr, reg, base, index, 1, disp); } void emitter::emitIns_ARR_R(instruction ins, emitAttr attr, regNumber reg, regNumber base, regNumber index, int disp) { emitIns_ARX_R(ins, attr, reg, base, index, 1, disp); } void emitter::emitIns_I_ARX( instruction ins, emitAttr attr, int val, regNumber reg, regNumber rg2, unsigned mul, int disp) { assert((CodeGen::instIsFP(ins) == false) && (EA_SIZE(attr) <= EA_8BYTE)); #ifdef TARGET_AMD64 // mov reg, imm64 is the only opcode which takes a full 8 byte immediate // all other opcodes take a sign-extended 4-byte immediate noway_assert(EA_SIZE(attr) < EA_8BYTE || !EA_IS_CNS_RELOC(attr)); #endif insFormat fmt; switch (ins) { case INS_rcl_N: case INS_rcr_N: case INS_rol_N: case INS_ror_N: case INS_shl_N: case INS_shr_N: case INS_sar_N: assert(val != 1); fmt = IF_ARW_SHF; val &= 0x7F; break; default: fmt = emitInsModeFormat(ins, IF_ARD_CNS); break; } UNATIVE_OFFSET sz; instrDesc* id = emitNewInstrAmdCns(attr, disp, val); id->idIns(ins); id->idInsFmt(fmt); id->idAddr()->iiaAddrMode.amBaseReg = reg; id->idAddr()->iiaAddrMode.amIndxReg = rg2; id->idAddr()->iiaAddrMode.amScale = emitEncodeScale(mul); assert(emitGetInsAmdAny(id) == disp); // make sure "disp" is stored properly sz = emitInsSizeAM(id, insCodeMI(ins), val); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } void emitter::emitIns_R_ARX( instruction ins, emitAttr attr, regNumber reg, regNumber base, regNumber index, unsigned scale, int disp) { assert(!CodeGen::instIsFP(ins) && (EA_SIZE(attr) <= EA_32BYTE) && (reg != REG_NA)); noway_assert(emitVerifyEncodable(ins, EA_SIZE(attr), reg)); if ((ins == INS_lea) && (reg == base) && (index == REG_NA) && (disp == 0)) { // Maybe the emitter is not the common place for this optimization, but it's a better choke point // for all the emitIns(ins, tree), we would have to be analyzing at each call site // return; } UNATIVE_OFFSET sz; instrDesc* id = emitNewInstrAmd(attr, disp); insFormat fmt = emitInsModeFormat(ins, IF_RRD_ARD); id->idIns(ins); id->idInsFmt(fmt); id->idReg1(reg); id->idAddr()->iiaAddrMode.amBaseReg = base; id->idAddr()->iiaAddrMode.amIndxReg = index; id->idAddr()->iiaAddrMode.amScale = emitEncodeScale(scale); assert(emitGetInsAmdAny(id) == disp); // make sure "disp" is stored properly sz = emitInsSizeAM(id, insCodeRM(ins)); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } void emitter::emitIns_ARX_R( instruction ins, emitAttr attr, regNumber reg, regNumber base, regNumber index, unsigned scale, cnsval_ssize_t disp) { UNATIVE_OFFSET sz; instrDesc* id = emitNewInstrAmd(attr, disp); insFormat fmt; if (reg == REG_NA) { fmt = emitInsModeFormat(ins, IF_ARD); } else { fmt = emitInsModeFormat(ins, IF_ARD_RRD); noway_assert(emitVerifyEncodable(ins, EA_SIZE(attr), reg)); assert(!CodeGen::instIsFP(ins) && (EA_SIZE(attr) <= EA_32BYTE)); id->idReg1(reg); } id->idIns(ins); id->idInsFmt(fmt); id->idAddr()->iiaAddrMode.amBaseReg = base; id->idAddr()->iiaAddrMode.amIndxReg = index; id->idAddr()->iiaAddrMode.amScale = emitEncodeScale(scale); assert(emitGetInsAmdAny(id) == disp); // make sure "disp" is stored properly sz = emitInsSizeAM(id, insCodeMR(ins)); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; emitAdjustStackDepthPushPop(ins); } void emitter::emitIns_I_AX(instruction ins, emitAttr attr, int val, regNumber reg, unsigned mul, int disp) { assert((CodeGen::instIsFP(ins) == false) && (EA_SIZE(attr) <= EA_8BYTE)); #ifdef TARGET_AMD64 // mov reg, imm64 is the only opcode which takes a full 8 byte immediate // all other opcodes take a sign-extended 4-byte immediate noway_assert(EA_SIZE(attr) < EA_8BYTE || !EA_IS_CNS_RELOC(attr)); #endif insFormat fmt; switch (ins) { case INS_rcl_N: case INS_rcr_N: case INS_rol_N: case INS_ror_N: case INS_shl_N: case INS_shr_N: case INS_sar_N: assert(val != 1); fmt = IF_ARW_SHF; val &= 0x7F; break; default: fmt = emitInsModeFormat(ins, IF_ARD_CNS); break; } UNATIVE_OFFSET sz; instrDesc* id = emitNewInstrAmdCns(attr, disp, val); id->idIns(ins); id->idInsFmt(fmt); id->idAddr()->iiaAddrMode.amBaseReg = REG_NA; id->idAddr()->iiaAddrMode.amIndxReg = reg; id->idAddr()->iiaAddrMode.amScale = emitEncodeScale(mul); assert(emitGetInsAmdAny(id) == disp); // make sure "disp" is stored properly sz = emitInsSizeAM(id, insCodeMI(ins), val); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } void emitter::emitIns_R_AX(instruction ins, emitAttr attr, regNumber ireg, regNumber reg, unsigned mul, int disp) { assert((CodeGen::instIsFP(ins) == false) && (EA_SIZE(attr) <= EA_8BYTE) && (ireg != REG_NA)); noway_assert(emitVerifyEncodable(ins, EA_SIZE(attr), ireg)); UNATIVE_OFFSET sz; instrDesc* id = emitNewInstrAmd(attr, disp); insFormat fmt = emitInsModeFormat(ins, IF_RRD_ARD); id->idIns(ins); id->idInsFmt(fmt); id->idReg1(ireg); id->idAddr()->iiaAddrMode.amBaseReg = REG_NA; id->idAddr()->iiaAddrMode.amIndxReg = reg; id->idAddr()->iiaAddrMode.amScale = emitEncodeScale(mul); assert(emitGetInsAmdAny(id) == disp); // make sure "disp" is stored properly sz = emitInsSizeAM(id, insCodeRM(ins)); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } void emitter::emitIns_AX_R(instruction ins, emitAttr attr, regNumber ireg, regNumber reg, unsigned mul, int disp) { UNATIVE_OFFSET sz; instrDesc* id = emitNewInstrAmd(attr, disp); insFormat fmt; if (ireg == REG_NA) { fmt = emitInsModeFormat(ins, IF_ARD); } else { fmt = emitInsModeFormat(ins, IF_ARD_RRD); noway_assert(emitVerifyEncodable(ins, EA_SIZE(attr), ireg)); assert((CodeGen::instIsFP(ins) == false) && (EA_SIZE(attr) <= EA_8BYTE)); id->idReg1(ireg); } id->idIns(ins); id->idInsFmt(fmt); id->idAddr()->iiaAddrMode.amBaseReg = REG_NA; id->idAddr()->iiaAddrMode.amIndxReg = reg; id->idAddr()->iiaAddrMode.amScale = emitEncodeScale(mul); assert(emitGetInsAmdAny(id) == disp); // make sure "disp" is stored properly sz = emitInsSizeAM(id, insCodeMR(ins)); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; emitAdjustStackDepthPushPop(ins); } //------------------------------------------------------------------------ // emitIns_SIMD_R_R_I: emits the code for an instruction that takes a register operand, an immediate operand // and that returns a value in register // // Arguments: // ins -- The instruction being emitted // attr -- The emit attribute // targetReg -- The target register // op1Reg -- The register of the first operand // ival -- The immediate value // // Notes: // This will handle the required register copy if 'op1Reg' and 'targetReg' are not the same, and // the 3-operand format is not available. // This is not really SIMD-specific, but is currently only used in that context, as that's // where we frequently need to handle the case of generating 3-operand or 2-operand forms // depending on what target ISA is supported. // void emitter::emitIns_SIMD_R_R_I(instruction ins, emitAttr attr, regNumber targetReg, regNumber op1Reg, int ival) { if (UseVEXEncoding() || IsDstSrcImmAvxInstruction(ins)) { emitIns_R_R_I(ins, attr, targetReg, op1Reg, ival); } else { emitIns_Mov(INS_movaps, attr, targetReg, op1Reg, /* canSkip */ true); emitIns_R_I(ins, attr, targetReg, ival); } } //------------------------------------------------------------------------ // emitIns_SIMD_R_R_A: emits the code for a SIMD instruction that takes a register operand, a GenTreeIndir address, // and that returns a value in register // // Arguments: // ins -- The instruction being emitted // attr -- The emit attribute // targetReg -- The target register // op1Reg -- The register of the first operand // indir -- The GenTreeIndir used for the memory address // void emitter::emitIns_SIMD_R_R_A( instruction ins, emitAttr attr, regNumber targetReg, regNumber op1Reg, GenTreeIndir* indir) { if (UseVEXEncoding()) { emitIns_R_R_A(ins, attr, targetReg, op1Reg, indir); } else { emitIns_Mov(INS_movaps, attr, targetReg, op1Reg, /* canSkip */ true); emitIns_R_A(ins, attr, targetReg, indir); } } //------------------------------------------------------------------------ // emitIns_SIMD_R_R_AR: emits the code for a SIMD instruction that takes a register operand, a base memory register, // and that returns a value in register // // Arguments: // ins -- The instruction being emitted // attr -- The emit attribute // targetReg -- The target register // op1Reg -- The register of the first operand // base -- The base register used for the memory address // offset -- The memory offset // void emitter::emitIns_SIMD_R_R_AR( instruction ins, emitAttr attr, regNumber targetReg, regNumber op1Reg, regNumber base, int offset) { if (UseVEXEncoding()) { emitIns_R_R_AR(ins, attr, targetReg, op1Reg, base, offset); } else { emitIns_Mov(INS_movaps, attr, targetReg, op1Reg, /* canSkip */ true); emitIns_R_AR(ins, attr, targetReg, base, offset); } } //------------------------------------------------------------------------ // emitIns_SIMD_R_R_C: emits the code for a SIMD instruction that takes a register operand, a field handle + offset, // and that returns a value in register // // Arguments: // ins -- The instruction being emitted // attr -- The emit attribute // targetReg -- The target register // op1Reg -- The register of the first operand // fldHnd -- The CORINFO_FIELD_HANDLE used for the memory address // offs -- The offset added to the memory address from fldHnd // void emitter::emitIns_SIMD_R_R_C( instruction ins, emitAttr attr, regNumber targetReg, regNumber op1Reg, CORINFO_FIELD_HANDLE fldHnd, int offs) { if (UseVEXEncoding()) { emitIns_R_R_C(ins, attr, targetReg, op1Reg, fldHnd, offs); } else { emitIns_Mov(INS_movaps, attr, targetReg, op1Reg, /* canSkip */ true); emitIns_R_C(ins, attr, targetReg, fldHnd, offs); } } //------------------------------------------------------------------------ // emitIns_SIMD_R_R_R: emits the code for a SIMD instruction that takes two register operands, and that returns a // value in register // // Arguments: // ins -- The instruction being emitted // attr -- The emit attribute // targetReg -- The target register // op1Reg -- The register of the first operand // op2Reg -- The register of the second operand // void emitter::emitIns_SIMD_R_R_R( instruction ins, emitAttr attr, regNumber targetReg, regNumber op1Reg, regNumber op2Reg) { if (UseVEXEncoding()) { emitIns_R_R_R(ins, attr, targetReg, op1Reg, op2Reg); } else { // Ensure we aren't overwriting op2 assert((op2Reg != targetReg) || (op1Reg == targetReg)); emitIns_Mov(INS_movaps, attr, targetReg, op1Reg, /* canSkip */ true); if (IsMovInstruction(ins)) { emitIns_Mov(ins, attr, targetReg, op2Reg, /* canSkip */ false); } else { emitIns_R_R(ins, attr, targetReg, op2Reg); } } } //------------------------------------------------------------------------ // emitIns_SIMD_R_R_S: emits the code for a SIMD instruction that takes a register operand, a variable index + offset, // and that returns a value in register // // Arguments: // ins -- The instruction being emitted // attr -- The emit attribute // targetReg -- The target register // op1Reg -- The register of the first operand // varx -- The variable index used for the memory address // offs -- The offset added to the memory address from varx // void emitter::emitIns_SIMD_R_R_S( instruction ins, emitAttr attr, regNumber targetReg, regNumber op1Reg, int varx, int offs) { if (UseVEXEncoding()) { emitIns_R_R_S(ins, attr, targetReg, op1Reg, varx, offs); } else { emitIns_Mov(INS_movaps, attr, targetReg, op1Reg, /* canSkip */ true); emitIns_R_S(ins, attr, targetReg, varx, offs); } } #ifdef FEATURE_HW_INTRINSICS //------------------------------------------------------------------------ // emitIns_SIMD_R_R_A_I: emits the code for a SIMD instruction that takes a register operand, a GenTreeIndir address, // an immediate operand, and that returns a value in register // // Arguments: // ins -- The instruction being emitted // attr -- The emit attribute // targetReg -- The target register // op1Reg -- The register of the first operand // indir -- The GenTreeIndir used for the memory address // ival -- The immediate value // void emitter::emitIns_SIMD_R_R_A_I( instruction ins, emitAttr attr, regNumber targetReg, regNumber op1Reg, GenTreeIndir* indir, int ival) { if (UseVEXEncoding()) { emitIns_R_R_A_I(ins, attr, targetReg, op1Reg, indir, ival, IF_RWR_RRD_ARD_CNS); } else { emitIns_Mov(INS_movaps, attr, targetReg, op1Reg, /* canSkip */ true); emitIns_R_A_I(ins, attr, targetReg, indir, ival); } } //------------------------------------------------------------------------ // emitIns_SIMD_R_R_AR_I: emits the code for a SIMD instruction that takes a register operand, a base memory register, // an immediate operand, and that returns a value in register // // Arguments: // ins -- The instruction being emitted // attr -- The emit attribute // targetReg -- The target register // op1Reg -- The register of the first operand // base -- The base register used for the memory address // ival -- The immediate value // void emitter::emitIns_SIMD_R_R_AR_I( instruction ins, emitAttr attr, regNumber targetReg, regNumber op1Reg, regNumber base, int ival) { if (UseVEXEncoding()) { emitIns_R_R_AR_I(ins, attr, targetReg, op1Reg, base, 0, ival); } else { emitIns_Mov(INS_movaps, attr, targetReg, op1Reg, /* canSkip */ true); emitIns_R_AR_I(ins, attr, targetReg, base, 0, ival); } } //------------------------------------------------------------------------ // emitIns_SIMD_R_R_C_I: emits the code for a SIMD instruction that takes a register operand, a field handle + offset, // an immediate operand, and that returns a value in register // // Arguments: // ins -- The instruction being emitted // attr -- The emit attribute // targetReg -- The target register // op1Reg -- The register of the first operand // fldHnd -- The CORINFO_FIELD_HANDLE used for the memory address // offs -- The offset added to the memory address from fldHnd // ival -- The immediate value // void emitter::emitIns_SIMD_R_R_C_I(instruction ins, emitAttr attr, regNumber targetReg, regNumber op1Reg, CORINFO_FIELD_HANDLE fldHnd, int offs, int ival) { if (UseVEXEncoding()) { emitIns_R_R_C_I(ins, attr, targetReg, op1Reg, fldHnd, offs, ival); } else { emitIns_Mov(INS_movaps, attr, targetReg, op1Reg, /* canSkip */ true); emitIns_R_C_I(ins, attr, targetReg, fldHnd, offs, ival); } } //------------------------------------------------------------------------ // emitIns_SIMD_R_R_R_I: emits the code for a SIMD instruction that takes two register operands, an immediate operand, // and that returns a value in register // // Arguments: // ins -- The instruction being emitted // attr -- The emit attribute // targetReg -- The target register // op1Reg -- The register of the first operand // op2Reg -- The register of the second operand // ival -- The immediate value // void emitter::emitIns_SIMD_R_R_R_I( instruction ins, emitAttr attr, regNumber targetReg, regNumber op1Reg, regNumber op2Reg, int ival) { if (UseVEXEncoding()) { emitIns_R_R_R_I(ins, attr, targetReg, op1Reg, op2Reg, ival); } else { // Ensure we aren't overwriting op2 assert((op2Reg != targetReg) || (op1Reg == targetReg)); emitIns_Mov(INS_movaps, attr, targetReg, op1Reg, /* canSkip */ true); emitIns_R_R_I(ins, attr, targetReg, op2Reg, ival); } } //------------------------------------------------------------------------ // emitIns_SIMD_R_R_S_I: emits the code for a SIMD instruction that takes a register operand, a variable index + offset, // an imediate operand, and that returns a value in register // // Arguments: // ins -- The instruction being emitted // attr -- The emit attribute // targetReg -- The target register // op1Reg -- The register of the first operand // varx -- The variable index used for the memory address // offs -- The offset added to the memory address from varx // ival -- The immediate value // void emitter::emitIns_SIMD_R_R_S_I( instruction ins, emitAttr attr, regNumber targetReg, regNumber op1Reg, int varx, int offs, int ival) { if (UseVEXEncoding()) { emitIns_R_R_S_I(ins, attr, targetReg, op1Reg, varx, offs, ival); } else { emitIns_Mov(INS_movaps, attr, targetReg, op1Reg, /* canSkip */ true); emitIns_R_S_I(ins, attr, targetReg, varx, offs, ival); } } //------------------------------------------------------------------------ // emitIns_SIMD_R_R_R_A: emits the code for a SIMD instruction that takes two register operands, a GenTreeIndir address, // and that returns a value in register // // Arguments: // ins -- The instruction being emitted // attr -- The emit attribute // targetReg -- The target register // op1Reg -- The register of the first operand // op2Reg -- The register of the second operand // indir -- The GenTreeIndir used for the memory address // void emitter::emitIns_SIMD_R_R_R_A( instruction ins, emitAttr attr, regNumber targetReg, regNumber op1Reg, regNumber op2Reg, GenTreeIndir* indir) { assert(IsFMAInstruction(ins) || IsAVXVNNIInstruction(ins)); assert(UseVEXEncoding()); // Ensure we aren't overwriting op2 assert((op2Reg != targetReg) || (op1Reg == targetReg)); emitIns_Mov(INS_movaps, attr, targetReg, op1Reg, /* canSkip */ true); emitIns_R_R_A(ins, attr, targetReg, op2Reg, indir); } //------------------------------------------------------------------------ // emitIns_SIMD_R_R_R_AR: emits the code for a SIMD instruction that takes two register operands, a base memory // register, and that returns a value in register // // Arguments: // ins -- The instruction being emitted // attr -- The emit attribute // targetReg -- The target register // op1Reg -- The register of the first operands // op2Reg -- The register of the second operand // base -- The base register used for the memory address // void emitter::emitIns_SIMD_R_R_R_AR( instruction ins, emitAttr attr, regNumber targetReg, regNumber op1Reg, regNumber op2Reg, regNumber base) { assert(IsFMAInstruction(ins)); assert(UseVEXEncoding()); // Ensure we aren't overwriting op2 assert((op2Reg != targetReg) || (op1Reg == targetReg)); emitIns_Mov(INS_movaps, attr, targetReg, op1Reg, /* canSkip */ true); emitIns_R_R_AR(ins, attr, targetReg, op2Reg, base, 0); } //------------------------------------------------------------------------ // emitIns_SIMD_R_R_R_C: emits the code for a SIMD instruction that takes two register operands, a field handle + // offset, and that returns a value in register // // Arguments: // ins -- The instruction being emitted // attr -- The emit attribute // targetReg -- The target register // op1Reg -- The register of the first operand // op2Reg -- The register of the second operand // fldHnd -- The CORINFO_FIELD_HANDLE used for the memory address // offs -- The offset added to the memory address from fldHnd // void emitter::emitIns_SIMD_R_R_R_C(instruction ins, emitAttr attr, regNumber targetReg, regNumber op1Reg, regNumber op2Reg, CORINFO_FIELD_HANDLE fldHnd, int offs) { assert(IsFMAInstruction(ins)); assert(UseVEXEncoding()); // Ensure we aren't overwriting op2 assert((op2Reg != targetReg) || (op1Reg == targetReg)); emitIns_Mov(INS_movaps, attr, targetReg, op1Reg, /* canSkip */ true); emitIns_R_R_C(ins, attr, targetReg, op2Reg, fldHnd, offs); } //------------------------------------------------------------------------ // emitIns_SIMD_R_R_R_R: emits the code for a SIMD instruction that takes three register operands, and that returns a // value in register // // Arguments: // ins -- The instruction being emitted // attr -- The emit attribute // targetReg -- The target register // op1Reg -- The register of the first operand // op2Reg -- The register of the second operand // op3Reg -- The register of the second operand // void emitter::emitIns_SIMD_R_R_R_R( instruction ins, emitAttr attr, regNumber targetReg, regNumber op1Reg, regNumber op2Reg, regNumber op3Reg) { if (IsFMAInstruction(ins) || IsAVXVNNIInstruction(ins)) { assert(UseVEXEncoding()); // Ensure we aren't overwriting op2 or op3 assert((op2Reg != targetReg) || (op1Reg == targetReg)); assert((op3Reg != targetReg) || (op1Reg == targetReg)); emitIns_Mov(INS_movaps, attr, targetReg, op1Reg, /* canSkip */ true); emitIns_R_R_R(ins, attr, targetReg, op2Reg, op3Reg); } else if (UseVEXEncoding()) { assert(isAvxBlendv(ins) || isSse41Blendv(ins)); // convert SSE encoding of SSE4.1 instructions to VEX encoding switch (ins) { case INS_blendvps: ins = INS_vblendvps; break; case INS_blendvpd: ins = INS_vblendvpd; break; case INS_pblendvb: ins = INS_vpblendvb; break; default: break; } emitIns_R_R_R_R(ins, attr, targetReg, op1Reg, op2Reg, op3Reg); } else { assert(isSse41Blendv(ins)); // Ensure we aren't overwriting op1 or op2 assert((op1Reg != REG_XMM0) || (op3Reg == REG_XMM0)); assert((op2Reg != REG_XMM0) || (op3Reg == REG_XMM0)); // SSE4.1 blendv* hardcode the mask vector (op3) in XMM0 emitIns_Mov(INS_movaps, attr, REG_XMM0, op3Reg, /* canSkip */ true); // Ensure we aren't overwriting op2 or oop3 (which should be REG_XMM0) assert((op2Reg != targetReg) || (op1Reg == targetReg)); assert(targetReg != REG_XMM0); emitIns_Mov(INS_movaps, attr, targetReg, op1Reg, /* canSkip */ true); emitIns_R_R(ins, attr, targetReg, op2Reg); } } //------------------------------------------------------------------------ // emitIns_SIMD_R_R_R_S: emits the code for a SIMD instruction that takes two register operands, a variable index + // offset, and that returns a value in register // // Arguments: // ins -- The instruction being emitted // attr -- The emit attribute // targetReg -- The target register // op1Reg -- The register of the first operand // op2Reg -- The register of the second operand // varx -- The variable index used for the memory address // offs -- The offset added to the memory address from varx // void emitter::emitIns_SIMD_R_R_R_S( instruction ins, emitAttr attr, regNumber targetReg, regNumber op1Reg, regNumber op2Reg, int varx, int offs) { assert(IsFMAInstruction(ins) || IsAVXVNNIInstruction(ins)); assert(UseVEXEncoding()); // Ensure we aren't overwriting op2 assert((op2Reg != targetReg) || (op1Reg == targetReg)); emitIns_Mov(INS_movaps, attr, targetReg, op1Reg, /* canSkip */ true); emitIns_R_R_S(ins, attr, targetReg, op2Reg, varx, offs); } //------------------------------------------------------------------------ // emitIns_SIMD_R_R_A_R: emits the code for a SIMD instruction that takes a register operand, a GenTreeIndir address, // another register operand, and that returns a value in register // // Arguments: // ins -- The instruction being emitted // attr -- The emit attribute // targetReg -- The target register // op1Reg -- The register of the first operand // op3Reg -- The register of the third operand // indir -- The GenTreeIndir used for the memory address // void emitter::emitIns_SIMD_R_R_A_R( instruction ins, emitAttr attr, regNumber targetReg, regNumber op1Reg, regNumber op3Reg, GenTreeIndir* indir) { if (UseVEXEncoding()) { assert(isAvxBlendv(ins) || isSse41Blendv(ins)); // convert SSE encoding of SSE4.1 instructions to VEX encoding switch (ins) { case INS_blendvps: { ins = INS_vblendvps; break; } case INS_blendvpd: { ins = INS_vblendvpd; break; } case INS_pblendvb: { ins = INS_vpblendvb; break; } default: { break; } } emitIns_R_R_A_R(ins, attr, targetReg, op1Reg, op3Reg, indir); } else { assert(isSse41Blendv(ins)); // Ensure we aren't overwriting op1 assert(op1Reg != REG_XMM0); // SSE4.1 blendv* hardcode the mask vector (op3) in XMM0 emitIns_Mov(INS_movaps, attr, REG_XMM0, op3Reg, /* canSkip */ true); // Ensure we aren't overwriting op3 (which should be REG_XMM0) assert(targetReg != REG_XMM0); emitIns_Mov(INS_movaps, attr, targetReg, op1Reg, /* canSkip */ true); emitIns_R_A(ins, attr, targetReg, indir); } } //------------------------------------------------------------------------ // emitIns_SIMD_R_R_AR_R: emits the code for a SIMD instruction that takes a register operand, a base memory // register, another register operand, and that returns a value in register // // Arguments: // ins -- The instruction being emitted // attr -- The emit attribute // targetReg -- The target register // op1Reg -- The register of the first operands // op3Reg -- The register of the third operand // base -- The base register used for the memory address // void emitter::emitIns_SIMD_R_R_AR_R( instruction ins, emitAttr attr, regNumber targetReg, regNumber op1Reg, regNumber op3Reg, regNumber base) { if (UseVEXEncoding()) { assert(isAvxBlendv(ins) || isSse41Blendv(ins)); // convert SSE encoding of SSE4.1 instructions to VEX encoding switch (ins) { case INS_blendvps: { ins = INS_vblendvps; break; } case INS_blendvpd: { ins = INS_vblendvpd; break; } case INS_pblendvb: { ins = INS_vpblendvb; break; } default: { break; } } emitIns_R_R_AR_R(ins, attr, targetReg, op1Reg, op3Reg, base, 0); } else { assert(isSse41Blendv(ins)); // Ensure we aren't overwriting op1 assert(op1Reg != REG_XMM0); // SSE4.1 blendv* hardcode the mask vector (op3) in XMM0 emitIns_Mov(INS_movaps, attr, REG_XMM0, op3Reg, /* canSkip */ true); // Ensure we aren't overwriting op3 (which should be REG_XMM0) assert(targetReg != REG_XMM0); emitIns_Mov(INS_movaps, attr, targetReg, op1Reg, /* canSkip */ true); emitIns_R_AR(ins, attr, targetReg, base, 0); } } //------------------------------------------------------------------------ // emitIns_SIMD_R_R_C_R: emits the code for a SIMD instruction that takes a register operand, a field handle + // offset, another register operand, and that returns a value in register // // Arguments: // ins -- The instruction being emitted // attr -- The emit attribute // targetReg -- The target register // op1Reg -- The register of the first operand // op3Reg -- The register of the third operand // fldHnd -- The CORINFO_FIELD_HANDLE used for the memory address // offs -- The offset added to the memory address from fldHnd // void emitter::emitIns_SIMD_R_R_C_R(instruction ins, emitAttr attr, regNumber targetReg, regNumber op1Reg, regNumber op3Reg, CORINFO_FIELD_HANDLE fldHnd, int offs) { if (UseVEXEncoding()) { assert(isAvxBlendv(ins) || isSse41Blendv(ins)); // convert SSE encoding of SSE4.1 instructions to VEX encoding switch (ins) { case INS_blendvps: { ins = INS_vblendvps; break; } case INS_blendvpd: { ins = INS_vblendvpd; break; } case INS_pblendvb: { ins = INS_vpblendvb; break; } default: { break; } } emitIns_R_R_C_R(ins, attr, targetReg, op1Reg, op3Reg, fldHnd, offs); } else { assert(isSse41Blendv(ins)); // Ensure we aren't overwriting op1 assert(op1Reg != REG_XMM0); // SSE4.1 blendv* hardcode the mask vector (op3) in XMM0 emitIns_Mov(INS_movaps, attr, REG_XMM0, op3Reg, /* canSkip */ true); // Ensure we aren't overwriting op3 (which should be REG_XMM0) assert(targetReg != REG_XMM0); emitIns_Mov(INS_movaps, attr, targetReg, op1Reg, /* canSkip */ true); emitIns_R_C(ins, attr, targetReg, fldHnd, offs); } } //------------------------------------------------------------------------ // emitIns_SIMD_R_R_S_R: emits the code for a SIMD instruction that takes a register operand, a variable index + // offset, another register operand, and that returns a value in register // // Arguments: // ins -- The instruction being emitted // attr -- The emit attribute // targetReg -- The target register // op1Reg -- The register of the first operand // op3Reg -- The register of the third operand // varx -- The variable index used for the memory address // offs -- The offset added to the memory address from varx // void emitter::emitIns_SIMD_R_R_S_R( instruction ins, emitAttr attr, regNumber targetReg, regNumber op1Reg, regNumber op3Reg, int varx, int offs) { if (UseVEXEncoding()) { assert(isAvxBlendv(ins) || isSse41Blendv(ins)); // convert SSE encoding of SSE4.1 instructions to VEX encoding switch (ins) { case INS_blendvps: { ins = INS_vblendvps; break; } case INS_blendvpd: { ins = INS_vblendvpd; break; } case INS_pblendvb: { ins = INS_vpblendvb; break; } default: { break; } } emitIns_R_R_S_R(ins, attr, targetReg, op1Reg, op3Reg, varx, offs); } else { assert(isSse41Blendv(ins)); // Ensure we aren't overwriting op1 assert(op1Reg != REG_XMM0); // SSE4.1 blendv* hardcode the mask vector (op3) in XMM0 emitIns_Mov(INS_movaps, attr, REG_XMM0, op3Reg, /* canSkip */ true); // Ensure we aren't overwriting op3 (which should be REG_XMM0) assert(targetReg != REG_XMM0); emitIns_Mov(INS_movaps, attr, targetReg, op1Reg, /* canSkip */ true); emitIns_R_S(ins, attr, targetReg, varx, offs); } } #endif // FEATURE_HW_INTRINSICS /***************************************************************************** * * The following add instructions referencing stack-based local variables. */ void emitter::emitIns_S(instruction ins, emitAttr attr, int varx, int offs) { UNATIVE_OFFSET sz; instrDesc* id = emitNewInstr(attr); insFormat fmt = emitInsModeFormat(ins, IF_SRD); id->idIns(ins); id->idInsFmt(fmt); id->idAddr()->iiaLclVar.initLclVarAddr(varx, offs); sz = emitInsSizeSV(id, insCodeMR(ins), varx, offs); id->idCodeSize(sz); #ifdef DEBUG id->idDebugOnlyInfo()->idVarRefOffs = emitVarRefOffs; #endif dispIns(id); emitCurIGsize += sz; emitAdjustStackDepthPushPop(ins); } //---------------------------------------------------------------------------------------- // IsRedundantStackMov: // Check if the current `mov` instruction is redundant and can be omitted when dealing with Load/Store from stack. // A `mov` is redundant in following 2 cases: // // 1. Move that is identical to last instruction emitted. // // vmovapd xmmword ptr [V01 rbp-20H], xmm0 # <-- last instruction // vmovapd xmmword ptr [V01 rbp-20H], xmm0 # <-- current instruction can be omitted. // // 2. Opposite Move as that of last instruction emitted. // // vmovupd ymmword ptr[V01 rbp-50H], ymm0 # <-- last instruction // vmovupd ymm0, ymmword ptr[V01 rbp-50H] # <-- current instruction can be omitted. // // Arguments: // ins - The current instruction // fmt - The current format // size - Operand size of current instruction // ireg - The current source/destination register // varx - The variable index used for the memory address // offs - The offset added to the memory address from varx // // Return Value: // true if the move instruction is redundant; otherwise, false. bool emitter::IsRedundantStackMov(instruction ins, insFormat fmt, emitAttr size, regNumber ireg, int varx, int offs) { assert(IsMovInstruction(ins)); assert((fmt == IF_SWR_RRD) || (fmt == IF_RWR_SRD)); if (!emitComp->opts.OptimizationEnabled()) { // The remaining move elisions should only happen if optimizations are enabled return false; } // Skip optimization if current instruction creates a GC live value. if (EA_IS_GCREF_OR_BYREF(size)) { return false; } bool hasSideEffect = HasSideEffect(ins, size); bool isFirstInstrInBlock = (emitCurIGinsCnt == 0) && ((emitCurIG->igFlags & IGF_EXTEND) == 0); // TODO-XArch-CQ: Certain instructions, such as movaps vs movups, are equivalent in // functionality even if their actual identifier differs and we should optimize these if (isFirstInstrInBlock || // Don't optimize if instruction is the first instruction in IG. (emitLastIns == nullptr) || // or if a last instruction doesn't exist (emitLastIns->idIns() != ins) || // or if the instruction is different from the last instruction (emitLastIns->idOpSize() != size)) // or if the operand size is different from the last instruction { return false; } // Don't optimize if the last instruction is also not a Load/Store. if (!((emitLastIns->idInsFmt() == IF_SWR_RRD) || (emitLastIns->idInsFmt() == IF_RWR_SRD))) { return false; } regNumber lastReg1 = emitLastIns->idReg1(); int varNum = emitLastIns->idAddr()->iiaLclVar.lvaVarNum(); int lastOffs = emitLastIns->idAddr()->iiaLclVar.lvaOffset(); // Check if the last instruction and current instructions use the same register and local memory. if (varNum == varx && lastReg1 == ireg && lastOffs == offs) { // Check if we did a switched mov in the last instruction and don't have a side effect if ((((emitLastIns->idInsFmt() == IF_RWR_SRD) && (fmt == IF_SWR_RRD)) || ((emitLastIns->idInsFmt() == IF_SWR_RRD) && (fmt == IF_RWR_SRD))) && !hasSideEffect) // or if the format is different from the last instruction { JITDUMP("\n -- suppressing mov because last instruction already moved from dst to src and the mov has " "no side-effects.\n"); return true; } // Check if we did same move in last instruction, side effects don't matter since they already happened if (emitLastIns->idInsFmt() == fmt) { JITDUMP("\n -- suppressing mov because last instruction already moved from src to dst.\n"); return true; } } return false; } void emitter::emitIns_S_R(instruction ins, emitAttr attr, regNumber ireg, int varx, int offs) { insFormat fmt = emitInsModeFormat(ins, IF_SRD_RRD); if (IsMovInstruction(ins) && IsRedundantStackMov(ins, fmt, attr, ireg, varx, offs)) { return; } UNATIVE_OFFSET sz; instrDesc* id = emitNewInstr(attr); id->idIns(ins); id->idInsFmt(fmt); id->idReg1(ireg); id->idAddr()->iiaLclVar.initLclVarAddr(varx, offs); sz = emitInsSizeSV(id, insCodeMR(ins), varx, offs); #ifdef TARGET_X86 if (attr == EA_1BYTE) { assert(isByteReg(ireg)); } #endif id->idCodeSize(sz); #ifdef DEBUG id->idDebugOnlyInfo()->idVarRefOffs = emitVarRefOffs; #endif dispIns(id); emitCurIGsize += sz; } void emitter::emitIns_R_S(instruction ins, emitAttr attr, regNumber ireg, int varx, int offs) { emitAttr size = EA_SIZE(attr); noway_assert(emitVerifyEncodable(ins, size, ireg)); insFormat fmt = emitInsModeFormat(ins, IF_RRD_SRD); if (IsMovInstruction(ins) && IsRedundantStackMov(ins, fmt, attr, ireg, varx, offs)) { return; } instrDesc* id = emitNewInstr(attr); UNATIVE_OFFSET sz; id->idIns(ins); id->idInsFmt(fmt); id->idReg1(ireg); id->idAddr()->iiaLclVar.initLclVarAddr(varx, offs); sz = emitInsSizeSV(id, insCodeRM(ins), varx, offs); id->idCodeSize(sz); #ifdef DEBUG id->idDebugOnlyInfo()->idVarRefOffs = emitVarRefOffs; #endif dispIns(id); emitCurIGsize += sz; } void emitter::emitIns_S_I(instruction ins, emitAttr attr, int varx, int offs, int val) { #ifdef TARGET_AMD64 // mov reg, imm64 is the only opcode which takes a full 8 byte immediate // all other opcodes take a sign-extended 4-byte immediate noway_assert(EA_SIZE(attr) < EA_8BYTE || !EA_IS_CNS_RELOC(attr)); #endif insFormat fmt; switch (ins) { case INS_rcl_N: case INS_rcr_N: case INS_rol_N: case INS_ror_N: case INS_shl_N: case INS_shr_N: case INS_sar_N: assert(val != 1); fmt = IF_SRW_SHF; val &= 0x7F; break; default: fmt = emitInsModeFormat(ins, IF_SRD_CNS); break; } instrDesc* id = emitNewInstrCns(attr, val); id->idIns(ins); id->idInsFmt(fmt); id->idAddr()->iiaLclVar.initLclVarAddr(varx, offs); UNATIVE_OFFSET sz = emitInsSizeSV(id, insCodeMI(ins), varx, offs, val); id->idCodeSize(sz); #ifdef DEBUG id->idDebugOnlyInfo()->idVarRefOffs = emitVarRefOffs; #endif dispIns(id); emitCurIGsize += sz; } /***************************************************************************** * * Record that a jump instruction uses the short encoding * */ void emitter::emitSetShortJump(instrDescJmp* id) { if (id->idjKeepLong) { return; } id->idjShort = true; } /***************************************************************************** * * Add a jmp instruction. * When dst is NULL, instrCount specifies number of instructions * to jump: positive is forward, negative is backward. */ void emitter::emitIns_J(instruction ins, BasicBlock* dst, int instrCount /* = 0 */) { UNATIVE_OFFSET sz; instrDescJmp* id = emitNewInstrJmp(); if (dst != nullptr) { assert(dst->bbFlags & BBF_HAS_LABEL); assert(instrCount == 0); } else { /* Only allow non-label jmps in prolog */ assert(emitPrologIG); assert(emitPrologIG == emitCurIG); assert(instrCount != 0); } id->idIns(ins); id->idInsFmt(IF_LABEL); #ifdef DEBUG // Mark the finally call if (ins == INS_call && emitComp->compCurBB->bbJumpKind == BBJ_CALLFINALLY) { id->idDebugOnlyInfo()->idFinallyCall = true; } #endif // DEBUG id->idjShort = 0; if (dst != nullptr) { /* Assume the jump will be long */ id->idAddr()->iiaBBlabel = dst; id->idjKeepLong = emitComp->fgInDifferentRegions(emitComp->compCurBB, dst); } else { id->idAddr()->iiaSetInstrCount(instrCount); id->idjKeepLong = false; /* This jump must be short */ emitSetShortJump(id); id->idSetIsBound(); } /* Record the jump's IG and offset within it */ id->idjIG = emitCurIG; id->idjOffs = emitCurIGsize; /* Append this jump to this IG's jump list */ id->idjNext = emitCurIGjmpList; emitCurIGjmpList = id; #if EMITTER_STATS emitTotalIGjmps++; #endif /* Figure out the max. size of the jump/call instruction */ if (ins == INS_call) { sz = CALL_INST_SIZE; } else if (ins == INS_push || ins == INS_push_hide) { // Pushing the address of a basicBlock will need a reloc // as the instruction uses the absolute address, // not a relative address if (emitComp->opts.compReloc) { id->idSetIsDspReloc(); } sz = PUSH_INST_SIZE; } else { insGroup* tgt = nullptr; if (dst != nullptr) { /* This is a jump - assume the worst */ sz = (ins == INS_jmp) ? JMP_SIZE_LARGE : JCC_SIZE_LARGE; /* Can we guess at the jump distance? */ tgt = (insGroup*)emitCodeGetCookie(dst); } else { sz = JMP_SIZE_SMALL; } if (tgt) { int extra; UNATIVE_OFFSET srcOffs; int jmpDist; assert(JMP_SIZE_SMALL == JCC_SIZE_SMALL); /* This is a backward jump - figure out the distance */ srcOffs = emitCurCodeOffset + emitCurIGsize + JMP_SIZE_SMALL; /* Compute the distance estimate */ jmpDist = srcOffs - tgt->igOffs; assert((int)jmpDist > 0); /* How much beyond the max. short distance does the jump go? */ extra = jmpDist + JMP_DIST_SMALL_MAX_NEG; #if DEBUG_EMIT if (id->idDebugOnlyInfo()->idNum == (unsigned)INTERESTING_JUMP_NUM || INTERESTING_JUMP_NUM == 0) { if (INTERESTING_JUMP_NUM == 0) { printf("[0] Jump %u:\n", id->idDebugOnlyInfo()->idNum); } printf("[0] Jump source is at %08X\n", srcOffs); printf("[0] Label block is at %08X\n", tgt->igOffs); printf("[0] Jump distance - %04X\n", jmpDist); if (extra > 0) { printf("[0] Distance excess = %d \n", extra); } } #endif if (extra <= 0 && !id->idjKeepLong) { /* Wonderful - this jump surely will be short */ emitSetShortJump(id); sz = JMP_SIZE_SMALL; } } #if DEBUG_EMIT else { if (id->idDebugOnlyInfo()->idNum == (unsigned)INTERESTING_JUMP_NUM || INTERESTING_JUMP_NUM == 0) { if (INTERESTING_JUMP_NUM == 0) { printf("[0] Jump %u:\n", id->idDebugOnlyInfo()->idNum); } printf("[0] Jump source is at %04X/%08X\n", emitCurIGsize, emitCurCodeOffset + emitCurIGsize + JMP_SIZE_SMALL); printf("[0] Label block is unknown\n"); } } #endif } id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; emitAdjustStackDepthPushPop(ins); } #if !FEATURE_FIXED_OUT_ARGS //------------------------------------------------------------------------ // emitAdjustStackDepthPushPop: Adjust the current and maximum stack depth. // // Arguments: // ins - the instruction. Only INS_push and INS_pop adjust the stack depth. // // Notes: // 1. Alters emitCurStackLvl and possibly emitMaxStackDepth. // 2. emitCntStackDepth must be set (0 in prolog/epilog, one DWORD elsewhere) // void emitter::emitAdjustStackDepthPushPop(instruction ins) { if (ins == INS_push) { emitCurStackLvl += emitCntStackDepth; if (emitMaxStackDepth < emitCurStackLvl) { JITDUMP("Upping emitMaxStackDepth from %d to %d\n", emitMaxStackDepth, emitCurStackLvl); emitMaxStackDepth = emitCurStackLvl; } } else if (ins == INS_pop) { emitCurStackLvl -= emitCntStackDepth; assert((int)emitCurStackLvl >= 0); } } //------------------------------------------------------------------------ // emitAdjustStackDepth: Adjust the current and maximum stack depth. // // Arguments: // ins - the instruction. Only INS_add and INS_sub adjust the stack depth. // It is assumed that the add/sub is on the stack pointer. // val - the number of bytes to add to or subtract from the stack pointer. // // Notes: // 1. Alters emitCurStackLvl and possibly emitMaxStackDepth. // 2. emitCntStackDepth must be set (0 in prolog/epilog, one DWORD elsewhere) // void emitter::emitAdjustStackDepth(instruction ins, ssize_t val) { // If we're in the prolog or epilog, or otherwise not tracking the stack depth, just return. if (emitCntStackDepth == 0) return; if (ins == INS_sub) { S_UINT32 newStackLvl(emitCurStackLvl); newStackLvl += S_UINT32(val); noway_assert(!newStackLvl.IsOverflow()); emitCurStackLvl = newStackLvl.Value(); if (emitMaxStackDepth < emitCurStackLvl) { JITDUMP("Upping emitMaxStackDepth from %d to %d\n", emitMaxStackDepth, emitCurStackLvl); emitMaxStackDepth = emitCurStackLvl; } } else if (ins == INS_add) { S_UINT32 newStackLvl = S_UINT32(emitCurStackLvl) - S_UINT32(val); noway_assert(!newStackLvl.IsOverflow()); emitCurStackLvl = newStackLvl.Value(); } } #endif // EMIT_TRACK_STACK_DEPTH /***************************************************************************** * * Add a call instruction (direct or indirect). * argSize<0 means that the caller will pop the arguments * * The other arguments are interpreted depending on callType as shown: * Unless otherwise specified, ireg,xreg,xmul,disp should have default values. * * EC_FUNC_TOKEN : addr is the method address * EC_FUNC_TOKEN_INDIR : addr is the indirect method address * EC_FUNC_ADDR : addr is the absolute address of the function * EC_FUNC_VIRTUAL : "call [ireg+disp]" * * If callType is one of these emitCallTypes, addr has to be NULL. * EC_INDIR_R : "call ireg". * EC_INDIR_SR : "call lcl<disp>" (eg. call [ebp-8]). * EC_INDIR_C : "call clsVar<disp>" (eg. call [clsVarAddr]) * EC_INDIR_ARD : "call [ireg+xreg*xmul+disp]" * */ // clang-format off void emitter::emitIns_Call(EmitCallType callType, CORINFO_METHOD_HANDLE methHnd, INDEBUG_LDISASM_COMMA(CORINFO_SIG_INFO* sigInfo) // used to report call sites to the EE void* addr, ssize_t argSize, emitAttr retSize MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(emitAttr secondRetSize), VARSET_VALARG_TP ptrVars, regMaskTP gcrefRegs, regMaskTP byrefRegs, const DebugInfo& di, regNumber ireg, regNumber xreg, unsigned xmul, ssize_t disp, bool isJump) // clang-format on { /* Sanity check the arguments depending on callType */ assert(callType < EC_COUNT); assert((callType != EC_FUNC_TOKEN && callType != EC_FUNC_TOKEN_INDIR) || (addr != nullptr && ireg == REG_NA && xreg == REG_NA && xmul == 0 && disp == 0)); assert(callType != EC_INDIR_R || (addr == nullptr && ireg < REG_COUNT && xreg == REG_NA && xmul == 0 && disp == 0)); assert(callType != EC_INDIR_ARD || (addr == nullptr)); // Our stack level should be always greater than the bytes of arguments we push. Just // a sanity test. assert((unsigned)abs((signed)argSize) <= codeGen->genStackLevel); // Trim out any callee-trashed registers from the live set. regMaskTP savedSet = emitGetGCRegsSavedOrModified(methHnd); gcrefRegs &= savedSet; byrefRegs &= savedSet; #ifdef DEBUG if (EMIT_GC_VERBOSE) { printf("\t\t\t\t\t\t\tCall: GCvars=%s ", VarSetOps::ToString(emitComp, ptrVars)); dumpConvertedVarSet(emitComp, ptrVars); printf(", gcrefRegs="); printRegMaskInt(gcrefRegs); emitDispRegSet(gcrefRegs); printf(", byrefRegs="); printRegMaskInt(byrefRegs); emitDispRegSet(byrefRegs); printf("\n"); } #endif /* Managed RetVal: emit sequence point for the call */ if (emitComp->opts.compDbgInfo && di.IsValid()) { codeGen->genIPmappingAdd(IPmappingDscKind::Normal, di, false); } /* We need to allocate the appropriate instruction descriptor based on whether this is a direct/indirect call, and whether we need to record an updated set of live GC variables. The stats for a ton of classes is as follows: Direct call w/o GC vars 220,216 Indir. call w/o GC vars 144,781 Direct call with GC vars 9,440 Indir. call with GC vars 5,768 */ instrDesc* id; assert(argSize % REGSIZE_BYTES == 0); int argCnt = (int)(argSize / (int)REGSIZE_BYTES); // we need a signed-divide if ((callType == EC_INDIR_R) || (callType == EC_INDIR_ARD)) { /* Indirect call, virtual calls */ id = emitNewInstrCallInd(argCnt, disp, ptrVars, gcrefRegs, byrefRegs, retSize MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize)); } else { // Helper/static/nonvirtual/function calls (direct or through handle), // and calls to an absolute addr. assert(callType == EC_FUNC_TOKEN || callType == EC_FUNC_TOKEN_INDIR); id = emitNewInstrCallDir(argCnt, ptrVars, gcrefRegs, byrefRegs, retSize MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize)); } /* Update the emitter's live GC ref sets */ VarSetOps::Assign(emitComp, emitThisGCrefVars, ptrVars); emitThisGCrefRegs = gcrefRegs; emitThisByrefRegs = byrefRegs; /* Set the instruction - special case jumping a function (tail call) */ instruction ins = INS_call; if (isJump) { if (callType == EC_FUNC_TOKEN) { ins = INS_l_jmp; } else { ins = INS_tail_i_jmp; } } id->idIns(ins); id->idSetIsNoGC(emitNoGChelper(methHnd)); UNATIVE_OFFSET sz; // Record the address: method, indirection, or funcptr if ((callType == EC_INDIR_R) || (callType == EC_INDIR_ARD)) { // This is an indirect call/jmp (either a virtual call or func ptr call) if (callType == EC_INDIR_R) // call reg { id->idSetIsCallRegPtr(); } // The function is "ireg" if id->idIsCallRegPtr(), // else [ireg+xmul*xreg+disp] id->idInsFmt(IF_ARD); id->idAddr()->iiaAddrMode.amBaseReg = ireg; id->idAddr()->iiaAddrMode.amIndxReg = xreg; id->idAddr()->iiaAddrMode.amScale = xmul ? emitEncodeScale(xmul) : emitter::OPSZ1; code_t code = insCodeMR(ins); if (ins == INS_tail_i_jmp) { // Tailcall with addressing mode/register needs to be rex.w // prefixed to be recognized as part of epilog by unwinder. code = AddRexWPrefix(ins, code); } sz = emitInsSizeAM(id, code); if (ireg == REG_NA && xreg == REG_NA) { if (codeGen->genCodeIndirAddrNeedsReloc(disp)) { id->idSetIsDspReloc(); } #ifdef TARGET_AMD64 else { // An absolute indir address that doesn't need reloc should fit within 32-bits // to be encoded as offset relative to zero. This addr mode requires an extra // SIB byte noway_assert((size_t) static_cast<int>(reinterpret_cast<intptr_t>(addr)) == (size_t)addr); sz++; } #endif // TARGET_AMD64 } } else if (callType == EC_FUNC_TOKEN_INDIR) { // call/jmp [method_addr] assert(addr != nullptr); id->idInsFmt(IF_METHPTR); id->idAddr()->iiaAddr = (BYTE*)addr; sz = 6; // Since this is an indirect call through a pointer and we don't // currently pass in emitAttr into this function, we query codegen // whether addr needs a reloc. if (codeGen->genCodeIndirAddrNeedsReloc((size_t)addr)) { id->idSetIsDspReloc(); } #ifdef TARGET_AMD64 else { // An absolute indir address that doesn't need reloc should fit within 32-bits // to be encoded as offset relative to zero. This addr mode requires an extra // SIB byte noway_assert((size_t) static_cast<int>(reinterpret_cast<intptr_t>(addr)) == (size_t)addr); sz++; } #endif // TARGET_AMD64 } else { // This is a simple direct call/jmp: call/jmp helper/method/addr assert(callType == EC_FUNC_TOKEN); assert(addr != nullptr); id->idInsFmt(IF_METHOD); sz = 5; id->idAddr()->iiaAddr = (BYTE*)addr; // Direct call to a method and no addr indirection is needed. if (codeGen->genCodeAddrNeedsReloc((size_t)addr)) { id->idSetIsDspReloc(); } } #ifdef DEBUG if (emitComp->verbose && 0) { if (id->idIsLargeCall()) { printf("[%02u] Rec call GC vars = %s\n", id->idDebugOnlyInfo()->idNum, VarSetOps::ToString(emitComp, ((instrDescCGCA*)id)->idcGCvars)); } } id->idDebugOnlyInfo()->idMemCookie = (size_t)methHnd; // method token id->idDebugOnlyInfo()->idCallSig = sigInfo; #endif // DEBUG #ifdef LATE_DISASM if (addr != nullptr) { codeGen->getDisAssembler().disSetMethod((size_t)addr, methHnd); } #endif // LATE_DISASM id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; #if !FEATURE_FIXED_OUT_ARGS /* The call will pop the arguments */ if (emitCntStackDepth && argSize > 0) { noway_assert((ssize_t)emitCurStackLvl >= argSize); emitCurStackLvl -= (int)argSize; assert((int)emitCurStackLvl >= 0); } #endif // !FEATURE_FIXED_OUT_ARGS } #ifdef DEBUG /***************************************************************************** * * The following called for each recorded instruction -- use for debugging. */ void emitter::emitInsSanityCheck(instrDesc* id) { // make certain you only try to put relocs on things that can have them. ID_OPS idOp = (ID_OPS)emitFmtToOps[id->idInsFmt()]; if ((idOp == ID_OP_SCNS) && id->idIsLargeCns()) { idOp = ID_OP_CNS; } if (id->idIsDspReloc()) { assert(idOp == ID_OP_NONE || idOp == ID_OP_AMD || idOp == ID_OP_DSP || idOp == ID_OP_DSP_CNS || idOp == ID_OP_AMD_CNS || idOp == ID_OP_SPEC || idOp == ID_OP_CALL || idOp == ID_OP_JMP || idOp == ID_OP_LBL); } if (id->idIsCnsReloc()) { assert(idOp == ID_OP_CNS || idOp == ID_OP_AMD_CNS || idOp == ID_OP_DSP_CNS || idOp == ID_OP_SPEC || idOp == ID_OP_CALL || idOp == ID_OP_JMP); } } #endif /***************************************************************************** * * Return the allocated size (in bytes) of the given instruction descriptor. */ size_t emitter::emitSizeOfInsDsc(instrDesc* id) { if (emitIsScnsInsDsc(id)) { return SMALL_IDSC_SIZE; } assert((unsigned)id->idInsFmt() < emitFmtCount); ID_OPS idOp = (ID_OPS)emitFmtToOps[id->idInsFmt()]; // An INS_call instruction may use a "fat" direct/indirect call descriptor // except for a local call to a label (i.e. call to a finally) // Only ID_OP_CALL and ID_OP_SPEC check for this, so we enforce that the // INS_call instruction always uses one of these idOps if (id->idIns() == INS_call) { assert(idOp == ID_OP_CALL || // is a direct call idOp == ID_OP_SPEC || // is a indirect call idOp == ID_OP_JMP); // is a local call to finally clause } switch (idOp) { case ID_OP_NONE: #if FEATURE_LOOP_ALIGN if (id->idIns() == INS_align) { return sizeof(instrDescAlign); } #endif break; case ID_OP_LBL: return sizeof(instrDescLbl); case ID_OP_JMP: return sizeof(instrDescJmp); case ID_OP_CALL: case ID_OP_SPEC: if (id->idIsLargeCall()) { /* Must be a "fat" indirect call descriptor */ return sizeof(instrDescCGCA); } FALLTHROUGH; case ID_OP_SCNS: case ID_OP_CNS: case ID_OP_DSP: case ID_OP_DSP_CNS: if (id->idIsLargeCns()) { if (id->idIsLargeDsp()) { return sizeof(instrDescCnsDsp); } else { return sizeof(instrDescCns); } } else { if (id->idIsLargeDsp()) { return sizeof(instrDescDsp); } else { return sizeof(instrDesc); } } case ID_OP_AMD: case ID_OP_AMD_CNS: if (id->idIsLargeCns()) { if (id->idIsLargeDsp()) { return sizeof(instrDescCnsAmd); } else { return sizeof(instrDescCns); } } else { if (id->idIsLargeDsp()) { return sizeof(instrDescAmd); } else { return sizeof(instrDesc); } } default: NO_WAY("unexpected instruction descriptor format"); break; } return sizeof(instrDesc); } /*****************************************************************************/ #ifdef DEBUG /***************************************************************************** * * Return a string that represents the given register. */ const char* emitter::emitRegName(regNumber reg, emitAttr attr, bool varName) { static char rb[2][128]; static unsigned char rbc = 0; const char* rn = emitComp->compRegVarName(reg, varName); #ifdef TARGET_AMD64 char suffix = '\0'; switch (EA_SIZE(attr)) { case EA_32BYTE: return emitYMMregName(reg); case EA_16BYTE: return emitXMMregName(reg); case EA_8BYTE: if ((REG_XMM0 <= reg) && (reg <= REG_XMM15)) { return emitXMMregName(reg); } break; case EA_4BYTE: if ((REG_XMM0 <= reg) && (reg <= REG_XMM15)) { return emitXMMregName(reg); } if (reg > REG_R15) { break; } if (reg > REG_RDI) { suffix = 'd'; goto APPEND_SUFFIX; } rbc = (rbc + 1) % 2; rb[rbc][0] = 'e'; rb[rbc][1] = rn[1]; rb[rbc][2] = rn[2]; rb[rbc][3] = 0; rn = rb[rbc]; break; case EA_2BYTE: if (reg > REG_RDI) { suffix = 'w'; goto APPEND_SUFFIX; } rn++; break; case EA_1BYTE: if (reg > REG_RDI) { suffix = 'b'; APPEND_SUFFIX: rbc = (rbc + 1) % 2; rb[rbc][0] = rn[0]; rb[rbc][1] = rn[1]; if (rn[2]) { assert(rn[3] == 0); rb[rbc][2] = rn[2]; rb[rbc][3] = suffix; rb[rbc][4] = 0; } else { rb[rbc][2] = suffix; rb[rbc][3] = 0; } } else { rbc = (rbc + 1) % 2; rb[rbc][0] = rn[1]; if (reg < 4) { rb[rbc][1] = 'l'; rb[rbc][2] = 0; } else { rb[rbc][1] = rn[2]; rb[rbc][2] = 'l'; rb[rbc][3] = 0; } } rn = rb[rbc]; break; default: break; } #endif // TARGET_AMD64 #ifdef TARGET_X86 assert(strlen(rn) >= 3); switch (EA_SIZE(attr)) { case EA_32BYTE: return emitYMMregName(reg); case EA_16BYTE: return emitXMMregName(reg); case EA_8BYTE: if ((REG_XMM0 <= reg) && (reg <= REG_XMM7)) { return emitXMMregName(reg); } break; case EA_4BYTE: if ((REG_XMM0 <= reg) && (reg <= REG_XMM7)) { return emitXMMregName(reg); } break; case EA_2BYTE: rn++; break; case EA_1BYTE: rbc = (rbc + 1) % 2; rb[rbc][0] = rn[1]; rb[rbc][1] = 'l'; strcpy_s(&rb[rbc][2], sizeof(rb[0]) - 2, rn + 3); rn = rb[rbc]; break; default: break; } #endif // TARGET_X86 #if 0 // The following is useful if you want register names to be tagged with * or ^ representing gcref or byref, respectively, // however it's possibly not interesting most of the time. if (EA_IS_GCREF(attr) || EA_IS_BYREF(attr)) { if (rn != rb[rbc]) { rbc = (rbc+1)%2; strcpy_s(rb[rbc], sizeof(rb[rbc]), rn); rn = rb[rbc]; } if (EA_IS_GCREF(attr)) { strcat_s(rb[rbc], sizeof(rb[rbc]), "*"); } else if (EA_IS_BYREF(attr)) { strcat_s(rb[rbc], sizeof(rb[rbc]), "^"); } } #endif // 0 return rn; } /***************************************************************************** * * Return a string that represents the given XMM register. */ const char* emitter::emitXMMregName(unsigned reg) { static const char* const regNames[] = { #define REGDEF(name, rnum, mask, sname) "x" sname, #include "register.h" }; assert(reg < REG_COUNT); assert(reg < ArrLen(regNames)); return regNames[reg]; } /***************************************************************************** * * Return a string that represents the given YMM register. */ const char* emitter::emitYMMregName(unsigned reg) { static const char* const regNames[] = { #define REGDEF(name, rnum, mask, sname) "y" sname, #include "register.h" }; assert(reg < REG_COUNT); assert(reg < ArrLen(regNames)); return regNames[reg]; } /***************************************************************************** * * Display a static data member reference. */ void emitter::emitDispClsVar(CORINFO_FIELD_HANDLE fldHnd, ssize_t offs, bool reloc /* = false */) { int doffs; /* Filter out the special case of fs:[offs] */ // Munge any pointers if we want diff-able disassembly if (emitComp->opts.disDiffable) { ssize_t top12bits = (offs >> 20); if ((top12bits != 0) && (top12bits != -1)) { offs = 0xD1FFAB1E; } } if (fldHnd == FLD_GLOBAL_FS) { printf("FS:[0x%04X]", offs); return; } if (fldHnd == FLD_GLOBAL_DS) { printf("[0x%04X]", offs); return; } printf("["); doffs = Compiler::eeGetJitDataOffs(fldHnd); if (reloc) { printf("reloc "); } if (doffs >= 0) { if (doffs & 1) { printf("@CNS%02u", doffs - 1); } else { printf("@RWD%02u", doffs); } if (offs) { printf("%+Id", offs); } } else { printf("classVar[%#x]", emitComp->dspPtr(fldHnd)); if (offs) { printf("%+Id", offs); } } printf("]"); if (emitComp->opts.varNames && offs < 0) { printf("'%s", emitComp->eeGetFieldName(fldHnd)); if (offs) { printf("%+Id", offs); } printf("'"); } } /***************************************************************************** * * Display a stack frame reference. */ void emitter::emitDispFrameRef(int varx, int disp, int offs, bool asmfm) { int addr; bool bEBP; printf("["); if (!asmfm || emitComp->lvaDoneFrameLayout == Compiler::NO_FRAME_LAYOUT) { if (varx < 0) { printf("TEMP_%02u", -varx); } else { printf("V%02u", +varx); } if (disp < 0) { printf("-0x%X", -disp); } else if (disp > 0) { printf("+0x%X", +disp); } } if (emitComp->lvaDoneFrameLayout == Compiler::FINAL_FRAME_LAYOUT) { if (!asmfm) { printf(" "); } addr = emitComp->lvaFrameAddress(varx, &bEBP) + disp; if (bEBP) { printf(STR_FPBASE); if (addr < 0) { printf("-%02XH", -addr); } else if (addr > 0) { printf("+%02XH", addr); } } else { /* Adjust the offset by amount currently pushed on the stack */ printf(STR_SPBASE); if (addr < 0) { printf("-%02XH", -addr); } else if (addr > 0) { printf("+%02XH", addr); } #if !FEATURE_FIXED_OUT_ARGS if (emitCurStackLvl) printf("+%02XH", emitCurStackLvl); #endif // !FEATURE_FIXED_OUT_ARGS } } printf("]"); if (varx >= 0 && emitComp->opts.varNames) { const char* varName = emitComp->compLocalVarName(varx, offs); if (varName) { printf("'%s", varName); if (disp < 0) { printf("-%d", -disp); } else if (disp > 0) { printf("+%d", +disp); } printf("'"); } } } /***************************************************************************** * * Display a reloc value * If we are formatting for a diffable assembly listing don't print the hex value * since it will prevent us from doing assembly diffs */ void emitter::emitDispReloc(ssize_t value) { if (emitComp->opts.disAsm && emitComp->opts.disDiffable) { printf("(reloc)"); } else { printf("(reloc 0x%Ix)", emitComp->dspPtr(value)); } } /***************************************************************************** * * Display an address mode. */ void emitter::emitDispAddrMode(instrDesc* id, bool noDetail) { bool nsep = false; ssize_t disp; unsigned jtno = 0; dataSection* jdsc = nullptr; /* The displacement field is in an unusual place for calls */ disp = (id->idIns() == INS_call) || (id->idIns() == INS_tail_i_jmp) ? emitGetInsCIdisp(id) : emitGetInsAmdAny(id); /* Display a jump table label if this is a switch table jump */ if (id->idIns() == INS_i_jmp) { UNATIVE_OFFSET offs = 0; /* Find the appropriate entry in the data section list */ for (jdsc = emitConsDsc.dsdList, jtno = 0; jdsc; jdsc = jdsc->dsNext) { UNATIVE_OFFSET size = jdsc->dsSize; /* Is this a label table? */ if (size & 1) { size--; jtno++; if (offs == id->idDebugOnlyInfo()->idMemCookie) { break; } } offs += size; } /* If we've found a matching entry then is a table jump */ if (jdsc) { if (id->idIsDspReloc()) { printf("reloc "); } printf("J_M%03u_DS%02u", emitComp->compMethodID, id->idDebugOnlyInfo()->idMemCookie); disp -= id->idDebugOnlyInfo()->idMemCookie; } } bool frameRef = false; printf("["); if (id->idAddr()->iiaAddrMode.amBaseReg != REG_NA) { printf("%s", emitRegName(id->idAddr()->iiaAddrMode.amBaseReg)); nsep = true; if (id->idAddr()->iiaAddrMode.amBaseReg == REG_ESP) { frameRef = true; } else if (emitComp->isFramePointerUsed() && id->idAddr()->iiaAddrMode.amBaseReg == REG_EBP) { frameRef = true; } } if (id->idAddr()->iiaAddrMode.amIndxReg != REG_NA) { size_t scale = emitDecodeScale(id->idAddr()->iiaAddrMode.amScale); if (nsep) { printf("+"); } if (scale > 1) { printf("%u*", scale); } printf("%s", emitRegName(id->idAddr()->iiaAddrMode.amIndxReg)); nsep = true; } if ((id->idIsDspReloc()) && (id->idIns() != INS_i_jmp)) { if (nsep) { printf("+"); } emitDispReloc(disp); } else { // Munge any pointers if we want diff-able disassembly // It's assumed to be a pointer when disp is outside of the range (-1M, +1M); top bits are not 0 or -1 if (!frameRef && emitComp->opts.disDiffable && (static_cast<size_t>((disp >> 20) + 1) > 1)) { if (nsep) { printf("+"); } printf("D1FFAB1EH"); } else if (disp > 0) { if (nsep) { printf("+"); } if (frameRef) { printf("%02XH", disp); } else if (disp < 1000) { printf("%d", disp); } else if (disp <= 0xFFFF) { printf("%04XH", disp); } else { printf("%08XH", disp); } } else if (disp < 0) { if (frameRef) { printf("-%02XH", -disp); } else if (disp > -1000) { printf("-%d", -disp); } else if (disp >= -0xFFFF) { printf("-%04XH", -disp); } else if (disp < -0xFFFFFF) { if (nsep) { printf("+"); } printf("%08XH", disp); } else { printf("-%08XH", -disp); } } else if (!nsep) { printf("%04XH", disp); } } printf("]"); // pretty print string if it looks like one if ((id->idGCref() == GCT_GCREF) && (id->idIns() == INS_mov) && (id->idAddr()->iiaAddrMode.amBaseReg == REG_NA)) { const WCHAR* str = emitComp->eeGetCPString(disp); if (str != nullptr) { printf(" '%S'", str); } } if (jdsc && !noDetail) { unsigned cnt = (jdsc->dsSize - 1) / TARGET_POINTER_SIZE; BasicBlock** bbp = (BasicBlock**)jdsc->dsCont; #ifdef TARGET_AMD64 #define SIZE_LETTER "Q" #else #define SIZE_LETTER "D" #endif printf("\n\n J_M%03u_DS%02u LABEL " SIZE_LETTER "WORD", emitComp->compMethodID, jtno); /* Display the label table (it's stored as "BasicBlock*" values) */ do { insGroup* lab; /* Convert the BasicBlock* value to an IG address */ lab = (insGroup*)emitCodeGetCookie(*bbp++); assert(lab); printf("\n D" SIZE_LETTER " %s", emitLabelString(lab)); } while (--cnt); } } /***************************************************************************** * * If the given instruction is a shift, display the 2nd operand. */ void emitter::emitDispShift(instruction ins, int cnt) { switch (ins) { case INS_rcl_1: case INS_rcr_1: case INS_rol_1: case INS_ror_1: case INS_shl_1: case INS_shr_1: case INS_sar_1: printf(", 1"); break; case INS_rcl: case INS_rcr: case INS_rol: case INS_ror: case INS_shl: case INS_shr: case INS_sar: printf(", cl"); break; case INS_rcl_N: case INS_rcr_N: case INS_rol_N: case INS_ror_N: case INS_shl_N: case INS_shr_N: case INS_sar_N: printf(", %d", cnt); break; default: break; } } /***************************************************************************** * * Display (optionally) the bytes for the instruction encoding in hex */ void emitter::emitDispInsHex(instrDesc* id, BYTE* code, size_t sz) { // We do not display the instruction hex if we want diff-able disassembly if (!emitComp->opts.disDiffable) { #ifdef TARGET_AMD64 // how many bytes per instruction we format for const size_t digits = 10; #else // TARGET_X86 const size_t digits = 6; #endif printf(" "); for (unsigned i = 0; i < sz; i++) { printf("%02X", (*((BYTE*)(code + i)))); } if (sz < digits) { printf("%.*s", 2 * (digits - sz), " "); } } } //-------------------------------------------------------------------- // emitDispIns: Dump the given instruction to jitstdout. // // Arguments: // id - The instruction // isNew - Whether the instruction is newly generated (before encoding). // doffs - If true, always display the passed-in offset. // asmfm - Whether the instruction should be displayed in assembly format. // If false some additional information may be printed for the instruction. // offset - The offset of the instruction. Only displayed if doffs is true or if // !isNew && !asmfm. // code - Pointer to the actual code, used for displaying the address and encoded bytes // if turned on. // sz - The size of the instruction, used to display the encoded bytes. // ig - The instruction group containing the instruction. Not used on xarch. // void emitter::emitDispIns( instrDesc* id, bool isNew, bool doffs, bool asmfm, unsigned offset, BYTE* code, size_t sz, insGroup* ig) { emitAttr attr; const char* sstr; instruction ins = id->idIns(); if (emitComp->verbose) { unsigned idNum = id->idDebugOnlyInfo()->idNum; printf("IN%04x: ", idNum); } #define ID_INFO_DSP_RELOC ((bool)(id->idIsDspReloc())) /* Display a constant value if the instruction references one */ if (!isNew) { switch (id->idInsFmt()) { int offs; case IF_MRD_RRD: case IF_MWR_RRD: case IF_MRW_RRD: case IF_RRD_MRD: case IF_RWR_MRD: case IF_RRW_MRD: case IF_MRD_CNS: case IF_MWR_CNS: case IF_MRW_CNS: case IF_MRW_SHF: case IF_MRD: case IF_MWR: case IF_MRW: case IF_MRD_OFF: /* Is this actually a reference to a data section? */ offs = Compiler::eeGetJitDataOffs(id->idAddr()->iiaFieldHnd); if (offs >= 0) { void* addr; /* Display a data section reference */ assert((unsigned)offs < emitConsDsc.dsdOffs); addr = emitConsBlock ? emitConsBlock + offs : nullptr; #if 0 // TODO-XArch-Cleanup: Fix or remove this code. /* Is the operand an integer or floating-point value? */ bool isFP = false; if (CodeGen::instIsFP(id->idIns())) { switch (id->idIns()) { case INS_fild: case INS_fildl: break; default: isFP = true; break; } } if (offs & 1) printf("@CNS%02u", offs); else printf("@RWD%02u", offs); printf(" "); if (addr) { addr = 0; // TODO-XArch-Bug?: // This was busted by switching the order // in which we output the code block vs. // the data blocks -- when we get here, // the data block has not been filled in // yet, so we'll display garbage. if (isFP) { if (id->idOpSize() == EA_4BYTE) printf("DF %f \n", addr ? *(float *)addr : 0); else printf("DQ %lf\n", addr ? *(double *)addr : 0); } else { if (id->idOpSize() <= EA_4BYTE) printf("DD %d \n", addr ? *(int *)addr : 0); else printf("DQ %D \n", addr ? *(__int64 *)addr : 0); } } #endif } break; default: break; } } // printf("[F=%s] " , emitIfName(id->idInsFmt())); // printf("INS#%03u: ", id->idDebugOnlyInfo()->idNum); // printf("[S=%02u] " , emitCurStackLvl); if (isNew) printf("[M=%02u] ", emitMaxStackDepth); // printf("[S=%02u] " , emitCurStackLvl/sizeof(INT32)); // printf("[A=%08X] " , emitSimpleStkMask); // printf("[A=%08X] " , emitSimpleByrefStkMask); // printf("[L=%02u] " , id->idCodeSize()); if (!isNew && !asmfm) { doffs = true; } /* Display the instruction address */ emitDispInsAddr(code); /* Display the instruction offset */ emitDispInsOffs(offset, doffs); if (code != nullptr) { /* Display the instruction hex code */ assert(((code >= emitCodeBlock) && (code < emitCodeBlock + emitTotalHotCodeSize)) || ((code >= emitColdCodeBlock) && (code < emitColdCodeBlock + emitTotalColdCodeSize))); emitDispInsHex(id, code + writeableOffset, sz); } /* Display the instruction name */ sstr = codeGen->genInsDisplayName(id); printf(" %-9s", sstr); #ifndef HOST_UNIX if (strnlen_s(sstr, 10) >= 9) #else // HOST_UNIX if (strnlen(sstr, 10) >= 9) #endif // HOST_UNIX { // Make sure there's at least one space after the instruction name, for very long instruction names. printf(" "); } /* By now the size better be set to something */ assert(id->idCodeSize() || emitInstHasNoCode(ins)); /* Figure out the operand size */ if (id->idGCref() == GCT_GCREF) { attr = EA_GCREF; sstr = "gword ptr "; } else if (id->idGCref() == GCT_BYREF) { attr = EA_BYREF; sstr = "bword ptr "; } else { attr = id->idOpSize(); sstr = codeGen->genSizeStr(emitGetMemOpSize(id)); if (ins == INS_lea) { #ifdef TARGET_AMD64 assert((attr == EA_4BYTE) || (attr == EA_8BYTE)); #else assert(attr == EA_4BYTE); #endif sstr = ""; } } /* Now see what instruction format we've got */ // First print the implicit register usage if (instrHasImplicitRegPairDest(ins)) { printf("%s:%s, ", emitRegName(REG_EDX, id->idOpSize()), emitRegName(REG_EAX, id->idOpSize())); } else if (instrIs3opImul(ins)) { regNumber tgtReg = inst3opImulReg(ins); printf("%s, ", emitRegName(tgtReg, id->idOpSize())); } switch (id->idInsFmt()) { ssize_t val; ssize_t offs; CnsVal cnsVal; const char* methodName; case IF_CNS: val = emitGetInsSC(id); #ifdef TARGET_AMD64 // no 8-byte immediates allowed here! assert((val >= (ssize_t)0xFFFFFFFF80000000LL) && (val <= 0x000000007FFFFFFFLL)); #endif if (id->idIsCnsReloc()) { emitDispReloc(val); } else { PRINT_CONSTANT: ssize_t srcVal = val; // Munge any pointers if we want diff-able disassembly if (emitComp->opts.disDiffable) { ssize_t top14bits = (val >> 18); if ((top14bits != 0) && (top14bits != -1)) { val = 0xD1FFAB1E; } } if ((val > -1000) && (val < 1000)) { printf("%d", val); } else if ((val > 0) || (val < -0xFFFFFF)) { printf("0x%IX", val); } else { // (val < 0) printf("-0x%IX", -val); } emitDispCommentForHandle(srcVal, id->idDebugOnlyInfo()->idFlags); } break; case IF_ARD: case IF_AWR: case IF_ARW: if (id->idIsCallRegPtr()) { printf("%s", emitRegName(id->idAddr()->iiaAddrMode.amBaseReg)); } else { // GC ref bit is for the return value for calls, do not print it before the address mode if ((ins != INS_call) && (ins != INS_tail_i_jmp)) { printf("%s", sstr); } emitDispAddrMode(id, isNew); emitDispShift(ins); } if ((ins == INS_call) || (ins == INS_tail_i_jmp)) { assert(id->idInsFmt() == IF_ARD); /* Ignore indirect calls */ if (id->idDebugOnlyInfo()->idMemCookie == 0) { break; } assert(id->idDebugOnlyInfo()->idMemCookie); if (id->idIsCallRegPtr()) { printf(" ; "); } /* This is a virtual call */ methodName = emitComp->eeGetMethodFullName((CORINFO_METHOD_HANDLE)id->idDebugOnlyInfo()->idMemCookie); printf("%s", methodName); } break; case IF_RRD_ARD: case IF_RWR_ARD: case IF_RRW_ARD: #ifdef TARGET_AMD64 if (ins == INS_movsxd) { attr = EA_8BYTE; } else #endif if (ins == INS_movsx || ins == INS_movzx) { attr = EA_PTRSIZE; } else if ((ins == INS_crc32) && (attr != EA_8BYTE)) { // The idReg1 is always 4 bytes, but the size of idReg2 can vary. // This logic ensures that we print `crc32 eax, bx` instead of `crc32 ax, bx` attr = EA_4BYTE; } printf("%s, %s", emitRegName(id->idReg1(), attr), sstr); emitDispAddrMode(id); break; case IF_RRW_ARD_CNS: case IF_RWR_ARD_CNS: { printf("%s, %s", emitRegName(id->idReg1(), attr), sstr); emitDispAddrMode(id); emitGetInsAmdCns(id, &cnsVal); val = cnsVal.cnsVal; printf(", "); if (cnsVal.cnsReloc) { emitDispReloc(val); } else { goto PRINT_CONSTANT; } break; } case IF_AWR_RRD_CNS: { assert(ins == INS_vextracti128 || ins == INS_vextractf128); // vextracti/f128 extracts 128-bit data, so we fix sstr as "xmm ptr" sstr = codeGen->genSizeStr(EA_ATTR(16)); printf(sstr); emitDispAddrMode(id); printf(", %s", emitRegName(id->idReg1(), attr)); emitGetInsAmdCns(id, &cnsVal); val = cnsVal.cnsVal; printf(", "); if (cnsVal.cnsReloc) { emitDispReloc(val); } else { goto PRINT_CONSTANT; } break; } case IF_RWR_RRD_ARD: printf("%s, %s, %s", emitRegName(id->idReg1(), attr), emitRegName(id->idReg2(), attr), sstr); emitDispAddrMode(id); break; case IF_RWR_ARD_RRD: if (ins == INS_vpgatherqd || ins == INS_vgatherqps) { attr = EA_16BYTE; } sstr = codeGen->genSizeStr(EA_ATTR(4)); printf("%s, %s", emitRegName(id->idReg1(), attr), sstr); emitDispAddrMode(id); printf(", %s", emitRegName(id->idReg2(), attr)); break; case IF_RWR_RRD_ARD_CNS: { printf("%s, %s, %s", emitRegName(id->idReg1(), attr), emitRegName(id->idReg2(), attr), sstr); emitDispAddrMode(id); emitGetInsAmdCns(id, &cnsVal); val = cnsVal.cnsVal; printf(", "); if (cnsVal.cnsReloc) { emitDispReloc(val); } else { goto PRINT_CONSTANT; } break; } case IF_RWR_RRD_ARD_RRD: { printf("%s, ", emitRegName(id->idReg1(), attr)); printf("%s, ", emitRegName(id->idReg2(), attr)); emitDispAddrMode(id); emitGetInsAmdCns(id, &cnsVal); val = (cnsVal.cnsVal >> 4) + XMMBASE; printf(", %s", emitRegName((regNumber)val, attr)); break; } case IF_ARD_RRD: case IF_AWR_RRD: case IF_ARW_RRD: printf("%s", sstr); emitDispAddrMode(id); printf(", %s", emitRegName(id->idReg1(), attr)); break; case IF_AWR_RRD_RRD: { printf("%s", sstr); emitDispAddrMode(id); printf(", %s", emitRegName(id->idReg1(), attr)); printf(", %s", emitRegName(id->idReg2(), attr)); break; } case IF_ARD_CNS: case IF_AWR_CNS: case IF_ARW_CNS: case IF_ARW_SHF: printf("%s", sstr); emitDispAddrMode(id); emitGetInsAmdCns(id, &cnsVal); val = cnsVal.cnsVal; #ifdef TARGET_AMD64 // no 8-byte immediates allowed here! assert((val >= (ssize_t)0xFFFFFFFF80000000LL) && (val <= 0x000000007FFFFFFFLL)); #endif if (id->idInsFmt() == IF_ARW_SHF) { emitDispShift(ins, (BYTE)val); } else { printf(", "); if (cnsVal.cnsReloc) { emitDispReloc(val); } else { goto PRINT_CONSTANT; } } break; case IF_SRD: case IF_SWR: case IF_SRW: printf("%s", sstr); #if !FEATURE_FIXED_OUT_ARGS if (ins == INS_pop) emitCurStackLvl -= sizeof(int); #endif emitDispFrameRef(id->idAddr()->iiaLclVar.lvaVarNum(), id->idAddr()->iiaLclVar.lvaOffset(), id->idDebugOnlyInfo()->idVarRefOffs, asmfm); #if !FEATURE_FIXED_OUT_ARGS if (ins == INS_pop) emitCurStackLvl += sizeof(int); #endif emitDispShift(ins); break; case IF_SRD_RRD: case IF_SWR_RRD: case IF_SRW_RRD: printf("%s", sstr); emitDispFrameRef(id->idAddr()->iiaLclVar.lvaVarNum(), id->idAddr()->iiaLclVar.lvaOffset(), id->idDebugOnlyInfo()->idVarRefOffs, asmfm); printf(", %s", emitRegName(id->idReg1(), attr)); break; case IF_SRD_CNS: case IF_SWR_CNS: case IF_SRW_CNS: case IF_SRW_SHF: printf("%s", sstr); emitDispFrameRef(id->idAddr()->iiaLclVar.lvaVarNum(), id->idAddr()->iiaLclVar.lvaOffset(), id->idDebugOnlyInfo()->idVarRefOffs, asmfm); emitGetInsCns(id, &cnsVal); val = cnsVal.cnsVal; #ifdef TARGET_AMD64 // no 8-byte immediates allowed here! assert((val >= (ssize_t)0xFFFFFFFF80000000LL) && (val <= 0x000000007FFFFFFFLL)); #endif if (id->idInsFmt() == IF_SRW_SHF) { emitDispShift(ins, (BYTE)val); } else { printf(", "); if (cnsVal.cnsReloc) { emitDispReloc(val); } else { goto PRINT_CONSTANT; } } break; case IF_SWR_RRD_CNS: assert(ins == INS_vextracti128 || ins == INS_vextractf128); assert(UseVEXEncoding()); emitGetInsAmdCns(id, &cnsVal); printf("%s", sstr); emitDispFrameRef(id->idAddr()->iiaLclVar.lvaVarNum(), id->idAddr()->iiaLclVar.lvaOffset(), id->idDebugOnlyInfo()->idVarRefOffs, asmfm); printf(", %s", emitRegName(id->idReg1(), attr)); val = cnsVal.cnsVal; printf(", "); if (cnsVal.cnsReloc) { emitDispReloc(val); } else { goto PRINT_CONSTANT; } break; case IF_RRD_SRD: case IF_RWR_SRD: case IF_RRW_SRD: #ifdef TARGET_AMD64 if (ins == INS_movsxd) { attr = EA_8BYTE; } else #endif if (ins == INS_movsx || ins == INS_movzx) { attr = EA_PTRSIZE; } else if ((ins == INS_crc32) && (attr != EA_8BYTE)) { // The idReg1 is always 4 bytes, but the size of idReg2 can vary. // This logic ensures that we print `crc32 eax, bx` instead of `crc32 ax, bx` attr = EA_4BYTE; } printf("%s, %s", emitRegName(id->idReg1(), attr), sstr); emitDispFrameRef(id->idAddr()->iiaLclVar.lvaVarNum(), id->idAddr()->iiaLclVar.lvaOffset(), id->idDebugOnlyInfo()->idVarRefOffs, asmfm); break; case IF_RRW_SRD_CNS: case IF_RWR_SRD_CNS: { printf("%s, %s", emitRegName(id->idReg1(), attr), sstr); emitDispFrameRef(id->idAddr()->iiaLclVar.lvaVarNum(), id->idAddr()->iiaLclVar.lvaOffset(), id->idDebugOnlyInfo()->idVarRefOffs, asmfm); emitGetInsCns(id, &cnsVal); val = cnsVal.cnsVal; printf(", "); if (cnsVal.cnsReloc) { emitDispReloc(val); } else { goto PRINT_CONSTANT; } break; } case IF_RWR_RRD_SRD: printf("%s, %s, %s", emitRegName(id->idReg1(), attr), emitRegName(id->idReg2(), attr), sstr); emitDispFrameRef(id->idAddr()->iiaLclVar.lvaVarNum(), id->idAddr()->iiaLclVar.lvaOffset(), id->idDebugOnlyInfo()->idVarRefOffs, asmfm); break; case IF_RWR_RRD_SRD_CNS: { printf("%s, %s, %s", emitRegName(id->idReg1(), attr), emitRegName(id->idReg2(), attr), sstr); emitDispFrameRef(id->idAddr()->iiaLclVar.lvaVarNum(), id->idAddr()->iiaLclVar.lvaOffset(), id->idDebugOnlyInfo()->idVarRefOffs, asmfm); emitGetInsCns(id, &cnsVal); val = cnsVal.cnsVal; printf(", "); if (cnsVal.cnsReloc) { emitDispReloc(val); } else { goto PRINT_CONSTANT; } break; } case IF_RWR_RRD_SRD_RRD: { printf("%s, ", emitRegName(id->idReg1(), attr)); printf("%s, ", emitRegName(id->idReg2(), attr)); emitDispFrameRef(id->idAddr()->iiaLclVar.lvaVarNum(), id->idAddr()->iiaLclVar.lvaOffset(), id->idDebugOnlyInfo()->idVarRefOffs, asmfm); emitGetInsCns(id, &cnsVal); val = (cnsVal.cnsVal >> 4) + XMMBASE; printf(", %s", emitRegName((regNumber)val, attr)); break; } case IF_RRD_RRD: case IF_RWR_RRD: case IF_RRW_RRD: if (ins == INS_pmovmskb) { printf("%s, %s", emitRegName(id->idReg1(), EA_4BYTE), emitRegName(id->idReg2(), attr)); } else if ((ins == INS_cvtsi2ss) || (ins == INS_cvtsi2sd)) { printf(" %s, %s", emitRegName(id->idReg1(), EA_16BYTE), emitRegName(id->idReg2(), attr)); } else if ((ins == INS_cvttsd2si) || (ins == INS_cvtss2si) || (ins == INS_cvtsd2si) || (ins == INS_cvttss2si)) { printf(" %s, %s", emitRegName(id->idReg1(), attr), emitRegName(id->idReg2(), EA_16BYTE)); } #ifdef TARGET_AMD64 else if (ins == INS_movsxd) { printf("%s, %s", emitRegName(id->idReg1(), EA_8BYTE), emitRegName(id->idReg2(), EA_4BYTE)); } #endif // TARGET_AMD64 else if (ins == INS_movsx || ins == INS_movzx) { printf("%s, %s", emitRegName(id->idReg1(), EA_PTRSIZE), emitRegName(id->idReg2(), attr)); } else if (ins == INS_bt) { // INS_bt operands are reversed. Display them in the normal order. printf("%s, %s", emitRegName(id->idReg2(), attr), emitRegName(id->idReg1(), attr)); } #ifdef FEATURE_HW_INTRINSICS else if (ins == INS_crc32 && attr != EA_8BYTE) { // The idReg1 is always 4 bytes, but the size of idReg2 can vary. // This logic ensures that we print `crc32 eax, bx` instead of `crc32 ax, bx` printf("%s, %s", emitRegName(id->idReg1(), EA_4BYTE), emitRegName(id->idReg2(), attr)); } #endif // FEATURE_HW_INTRINSICS else { printf("%s, %s", emitRegName(id->idReg1(), attr), emitRegName(id->idReg2(), attr)); } break; case IF_RRW_RRW: assert(ins == INS_xchg); printf("%s,", emitRegName(id->idReg1(), attr)); printf(" %s", emitRegName(id->idReg2(), attr)); break; case IF_RWR_RRD_RRD: { assert(IsAVXInstruction(ins)); assert(IsThreeOperandAVXInstruction(ins)); regNumber reg2 = id->idReg2(); regNumber reg3 = id->idReg3(); if (ins == INS_bextr || ins == INS_bzhi) { // BMI bextr and bzhi encodes the reg2 in VEX.vvvv and reg3 in modRM, // which is different from most of other instructions regNumber tmp = reg2; reg2 = reg3; reg3 = tmp; } printf("%s, ", emitRegName(id->idReg1(), attr)); printf("%s, ", emitRegName(reg2, attr)); printf("%s", emitRegName(reg3, attr)); break; } case IF_RWR_RRD_RRD_CNS: assert(IsAVXInstruction(ins)); assert(IsThreeOperandAVXInstruction(ins)); printf("%s, ", emitRegName(id->idReg1(), attr)); printf("%s, ", emitRegName(id->idReg2(), attr)); switch (ins) { case INS_vinsertf128: case INS_vinserti128: { attr = EA_16BYTE; break; } case INS_pinsrb: case INS_pinsrw: case INS_pinsrd: { attr = EA_4BYTE; break; } case INS_pinsrq: { attr = EA_8BYTE; break; } default: { break; } } printf("%s, ", emitRegName(id->idReg3(), attr)); val = emitGetInsSC(id); goto PRINT_CONSTANT; break; case IF_RWR_RRD_RRD_RRD: assert(IsAVXOnlyInstruction(ins)); assert(UseVEXEncoding()); printf("%s, ", emitRegName(id->idReg1(), attr)); printf("%s, ", emitRegName(id->idReg2(), attr)); printf("%s, ", emitRegName(id->idReg3(), attr)); printf("%s", emitRegName(id->idReg4(), attr)); break; case IF_RRW_RRW_CNS: { emitAttr tgtAttr = attr; switch (ins) { case INS_vextractf128: case INS_vextracti128: { tgtAttr = EA_16BYTE; break; } case INS_extractps: case INS_pextrb: case INS_pextrw: case INS_pextrw_sse41: case INS_pextrd: { tgtAttr = EA_4BYTE; break; } case INS_pextrq: { tgtAttr = EA_8BYTE; break; } case INS_pinsrb: case INS_pinsrw: case INS_pinsrd: { attr = EA_4BYTE; break; } case INS_pinsrq: { attr = EA_8BYTE; break; } default: { break; } } printf("%s,", emitRegName(id->idReg1(), tgtAttr)); printf(" %s", emitRegName(id->idReg2(), attr)); val = emitGetInsSC(id); #ifdef TARGET_AMD64 // no 8-byte immediates allowed here! assert((val >= (ssize_t)0xFFFFFFFF80000000LL) && (val <= 0x000000007FFFFFFFLL)); #endif printf(", "); if (id->idIsCnsReloc()) { emitDispReloc(val); } else { goto PRINT_CONSTANT; } break; } case IF_RRD: case IF_RWR: case IF_RRW: printf("%s", emitRegName(id->idReg1(), attr)); emitDispShift(ins); break; case IF_RRW_SHF: printf("%s", emitRegName(id->idReg1(), attr)); emitDispShift(ins, (BYTE)emitGetInsSC(id)); break; case IF_RRD_MRD: case IF_RWR_MRD: case IF_RRW_MRD: if (ins == INS_movsx || ins == INS_movzx) { attr = EA_PTRSIZE; } #ifdef TARGET_AMD64 else if (ins == INS_movsxd) { attr = EA_PTRSIZE; } #endif else if ((ins == INS_crc32) && (attr != EA_8BYTE)) { // The idReg1 is always 4 bytes, but the size of idReg2 can vary. // This logic ensures that we print `crc32 eax, bx` instead of `crc32 ax, bx` attr = EA_4BYTE; } printf("%s, %s", emitRegName(id->idReg1(), attr), sstr); offs = emitGetInsDsp(id); emitDispClsVar(id->idAddr()->iiaFieldHnd, offs, ID_INFO_DSP_RELOC); break; case IF_RRW_MRD_CNS: case IF_RWR_MRD_CNS: { printf("%s, %s", emitRegName(id->idReg1(), attr), sstr); offs = emitGetInsDsp(id); emitDispClsVar(id->idAddr()->iiaFieldHnd, offs, ID_INFO_DSP_RELOC); emitGetInsDcmCns(id, &cnsVal); val = cnsVal.cnsVal; printf(", "); if (cnsVal.cnsReloc) { emitDispReloc(val); } else { goto PRINT_CONSTANT; } break; } case IF_MWR_RRD_CNS: { assert(ins == INS_vextracti128 || ins == INS_vextractf128); // vextracti/f128 extracts 128-bit data, so we fix sstr as "xmm ptr" sstr = codeGen->genSizeStr(EA_ATTR(16)); printf(sstr); offs = emitGetInsDsp(id); emitDispClsVar(id->idAddr()->iiaFieldHnd, offs, ID_INFO_DSP_RELOC); printf(", %s", emitRegName(id->idReg1(), attr)); emitGetInsDcmCns(id, &cnsVal); val = cnsVal.cnsVal; printf(", "); if (cnsVal.cnsReloc) { emitDispReloc(val); } else { goto PRINT_CONSTANT; } break; } case IF_RWR_RRD_MRD: printf("%s, %s, %s", emitRegName(id->idReg1(), attr), emitRegName(id->idReg2(), attr), sstr); offs = emitGetInsDsp(id); emitDispClsVar(id->idAddr()->iiaFieldHnd, offs, ID_INFO_DSP_RELOC); break; case IF_RWR_RRD_MRD_CNS: { printf("%s, %s, %s", emitRegName(id->idReg1(), attr), emitRegName(id->idReg2(), attr), sstr); offs = emitGetInsDsp(id); emitDispClsVar(id->idAddr()->iiaFieldHnd, offs, ID_INFO_DSP_RELOC); emitGetInsDcmCns(id, &cnsVal); val = cnsVal.cnsVal; printf(", "); if (cnsVal.cnsReloc) { emitDispReloc(val); } else { goto PRINT_CONSTANT; } break; } case IF_RWR_RRD_MRD_RRD: { printf("%s, ", emitRegName(id->idReg1(), attr)); printf("%s, ", emitRegName(id->idReg2(), attr)); offs = emitGetInsDsp(id); emitDispClsVar(id->idAddr()->iiaFieldHnd, offs, ID_INFO_DSP_RELOC); emitGetInsDcmCns(id, &cnsVal); val = (cnsVal.cnsVal >> 4) + XMMBASE; printf(", %s", emitRegName((regNumber)val, attr)); break; } case IF_RWR_MRD_OFF: printf("%s, %s", emitRegName(id->idReg1(), attr), "offset"); offs = emitGetInsDsp(id); emitDispClsVar(id->idAddr()->iiaFieldHnd, offs, ID_INFO_DSP_RELOC); break; case IF_MRD_RRD: case IF_MWR_RRD: case IF_MRW_RRD: printf("%s", sstr); offs = emitGetInsDsp(id); emitDispClsVar(id->idAddr()->iiaFieldHnd, offs, ID_INFO_DSP_RELOC); printf(", %s", emitRegName(id->idReg1(), attr)); break; case IF_MRD_CNS: case IF_MWR_CNS: case IF_MRW_CNS: case IF_MRW_SHF: printf("%s", sstr); offs = emitGetInsDsp(id); emitDispClsVar(id->idAddr()->iiaFieldHnd, offs, ID_INFO_DSP_RELOC); emitGetInsDcmCns(id, &cnsVal); val = cnsVal.cnsVal; #ifdef TARGET_AMD64 // no 8-byte immediates allowed here! assert((val >= (ssize_t)0xFFFFFFFF80000000LL) && (val <= 0x000000007FFFFFFFLL)); #endif if (cnsVal.cnsReloc) { emitDispReloc(val); } else if (id->idInsFmt() == IF_MRW_SHF) { emitDispShift(ins, (BYTE)val); } else { printf(", "); goto PRINT_CONSTANT; } break; case IF_MRD: case IF_MWR: case IF_MRW: printf("%s", sstr); offs = emitGetInsDsp(id); emitDispClsVar(id->idAddr()->iiaFieldHnd, offs, ID_INFO_DSP_RELOC); emitDispShift(ins); break; case IF_MRD_OFF: printf("offset "); offs = emitGetInsDsp(id); emitDispClsVar(id->idAddr()->iiaFieldHnd, offs, ID_INFO_DSP_RELOC); break; case IF_RRD_CNS: case IF_RWR_CNS: case IF_RRW_CNS: printf("%s, ", emitRegName(id->idReg1(), attr)); val = emitGetInsSC(id); if (id->idIsCnsReloc()) { emitDispReloc(val); } else { goto PRINT_CONSTANT; } break; case IF_LABEL: case IF_RWR_LABEL: case IF_SWR_LABEL: if (ins == INS_lea) { printf("%s, ", emitRegName(id->idReg1(), attr)); } else if (ins == INS_mov) { /* mov dword ptr [frame.callSiteReturnAddress], label */ assert(id->idInsFmt() == IF_SWR_LABEL); instrDescLbl* idlbl = (instrDescLbl*)id; emitDispFrameRef(idlbl->dstLclVar.lvaVarNum(), idlbl->dstLclVar.lvaOffset(), 0, asmfm); printf(", "); } if (((instrDescJmp*)id)->idjShort) { printf("SHORT "); } if (id->idIsBound()) { if (id->idAddr()->iiaHasInstrCount()) { printf("%3d instr", id->idAddr()->iiaGetInstrCount()); } else { emitPrintLabel(id->idAddr()->iiaIGlabel); } } else { printf("L_M%03u_" FMT_BB, emitComp->compMethodID, id->idAddr()->iiaBBlabel->bbNum); } break; case IF_METHOD: case IF_METHPTR: methodName = emitComp->eeGetMethodFullName((CORINFO_METHOD_HANDLE)id->idDebugOnlyInfo()->idMemCookie); if (id->idInsFmt() == IF_METHPTR) { printf("["); } printf("%s", methodName); if (id->idInsFmt() == IF_METHPTR) { printf("]"); } break; case IF_NONE: #if FEATURE_LOOP_ALIGN if (ins == INS_align) { instrDescAlign* alignInstrId = (instrDescAlign*)id; printf("[%d bytes", alignInstrId->idCodeSize()); // targetIG is only set for 1st of the series of align instruction if ((alignInstrId->idaLoopHeadPredIG != nullptr) && (alignInstrId->loopHeadIG() != nullptr)) { printf(" for IG%02u", alignInstrId->loopHeadIG()->igNum); } printf("]"); } #endif break; default: printf("unexpected format %s", emitIfName(id->idInsFmt())); assert(!"unexpectedFormat"); break; } if (sz != 0 && sz != id->idCodeSize() && (!asmfm || emitComp->verbose)) { // Code size in the instrDesc is different from the actual code size we've been given! printf(" (ECS:%d, ACS:%d)", id->idCodeSize(), sz); } printf("\n"); } /*****************************************************************************/ #endif /***************************************************************************** * * Output nBytes bytes of NOP instructions */ static BYTE* emitOutputNOP(BYTE* dstRW, size_t nBytes) { assert(nBytes <= 15); #ifndef TARGET_AMD64 // TODO-X86-CQ: when VIA C3 CPU's are out of circulation, switch to the // more efficient real NOP: 0x0F 0x1F +modR/M // Also can't use AMD recommended, multiple size prefixes (i.e. 0x66 0x66 0x90 for 3 byte NOP) // because debugger and msdis don't like it, so maybe VIA doesn't either // So instead just stick to repeating single byte nops switch (nBytes) { case 15: *dstRW++ = 0x90; FALLTHROUGH; case 14: *dstRW++ = 0x90; FALLTHROUGH; case 13: *dstRW++ = 0x90; FALLTHROUGH; case 12: *dstRW++ = 0x90; FALLTHROUGH; case 11: *dstRW++ = 0x90; FALLTHROUGH; case 10: *dstRW++ = 0x90; FALLTHROUGH; case 9: *dstRW++ = 0x90; FALLTHROUGH; case 8: *dstRW++ = 0x90; FALLTHROUGH; case 7: *dstRW++ = 0x90; FALLTHROUGH; case 6: *dstRW++ = 0x90; FALLTHROUGH; case 5: *dstRW++ = 0x90; FALLTHROUGH; case 4: *dstRW++ = 0x90; FALLTHROUGH; case 3: *dstRW++ = 0x90; FALLTHROUGH; case 2: *dstRW++ = 0x90; FALLTHROUGH; case 1: *dstRW++ = 0x90; break; case 0: break; } #else // TARGET_AMD64 switch (nBytes) { case 2: *dstRW++ = 0x66; FALLTHROUGH; case 1: *dstRW++ = 0x90; break; case 0: break; case 3: *dstRW++ = 0x0F; *dstRW++ = 0x1F; *dstRW++ = 0x00; break; case 4: *dstRW++ = 0x0F; *dstRW++ = 0x1F; *dstRW++ = 0x40; *dstRW++ = 0x00; break; case 6: *dstRW++ = 0x66; FALLTHROUGH; case 5: *dstRW++ = 0x0F; *dstRW++ = 0x1F; *dstRW++ = 0x44; *dstRW++ = 0x00; *dstRW++ = 0x00; break; case 7: *dstRW++ = 0x0F; *dstRW++ = 0x1F; *dstRW++ = 0x80; *dstRW++ = 0x00; *dstRW++ = 0x00; *dstRW++ = 0x00; *dstRW++ = 0x00; break; case 15: // More than 3 prefixes is slower than just 2 NOPs dstRW = emitOutputNOP(emitOutputNOP(dstRW, 7), 8); break; case 14: // More than 3 prefixes is slower than just 2 NOPs dstRW = emitOutputNOP(emitOutputNOP(dstRW, 7), 7); break; case 13: // More than 3 prefixes is slower than just 2 NOPs dstRW = emitOutputNOP(emitOutputNOP(dstRW, 5), 8); break; case 12: // More than 3 prefixes is slower than just 2 NOPs dstRW = emitOutputNOP(emitOutputNOP(dstRW, 4), 8); break; case 11: *dstRW++ = 0x66; FALLTHROUGH; case 10: *dstRW++ = 0x66; FALLTHROUGH; case 9: *dstRW++ = 0x66; FALLTHROUGH; case 8: *dstRW++ = 0x0F; *dstRW++ = 0x1F; *dstRW++ = 0x84; *dstRW++ = 0x00; *dstRW++ = 0x00; *dstRW++ = 0x00; *dstRW++ = 0x00; *dstRW++ = 0x00; break; } #endif // TARGET_AMD64 return dstRW; } //-------------------------------------------------------------------- // emitOutputAlign: Outputs NOP to align the loop // // Arguments: // ig - Current instruction group // id - align instruction that holds amount of padding (NOPs) to add // dst - Destination buffer // // Return Value: // None. // // Notes: // Amount of padding needed to align the loop is already calculated. This // method extracts that information and inserts suitable NOP instructions. // BYTE* emitter::emitOutputAlign(insGroup* ig, instrDesc* id, BYTE* dst) { instrDescAlign* alignInstr = (instrDescAlign*)id; #ifdef DEBUG // For cases where 'align' was placed behing a 'jmp' in an IG that does not // immediately preced the loop IG, we do not know in advance the offset of // IG having loop. For such cases, skip the padding calculation validation. bool validatePadding = !alignInstr->isPlacedAfterJmp; #endif // Candidate for loop alignment assert(codeGen->ShouldAlignLoops()); assert(ig->endsWithAlignInstr()); unsigned paddingToAdd = id->idCodeSize(); // Either things are already aligned or align them here. assert(!validatePadding || (paddingToAdd == 0) || (((size_t)dst & (emitComp->opts.compJitAlignLoopBoundary - 1)) != 0)); // Padding amount should not exceed the alignment boundary assert(0 <= paddingToAdd && paddingToAdd < emitComp->opts.compJitAlignLoopBoundary); #ifdef DEBUG if (validatePadding) { unsigned paddingNeeded = emitCalculatePaddingForLoopAlignment(((instrDescAlign*)id)->idaIG->igNext, (size_t)dst, true); // For non-adaptive, padding size is spread in multiple instructions, so don't bother checking if (emitComp->opts.compJitAlignLoopAdaptive) { assert(paddingToAdd == paddingNeeded); } } emitComp->loopsAligned++; #endif BYTE* dstRW = dst + writeableOffset; #ifdef DEBUG // Under STRESS_EMITTER, if this is the 'align' before the 'jmp' instruction, // then add "int3" instruction. Since int3 takes 1 byte, we would only add // it if paddingToAdd >= 1 byte. if (emitComp->compStressCompile(Compiler::STRESS_EMITTER, 50) && alignInstr->isPlacedAfterJmp && paddingToAdd >= 1) { size_t int3Code = insCodeMR(INS_BREAKPOINT); // There is no good way to squeeze in "int3" as well as display it // in the disassembly because there is no corresponding instrDesc for // it. As such, leave it as is, the "0xCC" bytecode will be seen next // to the nop instruction in disasm. // e.g. CC align [1 bytes for IG29] // // if (emitComp->opts.disAsm) //{ // emitDispInsAddr(dstRW); // emitDispInsOffs(0, false); // printf(" %-9s ; stress-mode injected interrupt\n", "int3"); //} dstRW += emitOutputByte(dstRW, int3Code); paddingToAdd -= 1; } #endif dstRW = emitOutputNOP(dstRW, paddingToAdd); return dstRW - writeableOffset; } /***************************************************************************** * * Output an instruction involving an address mode. */ BYTE* emitter::emitOutputAM(BYTE* dst, instrDesc* id, code_t code, CnsVal* addc) { regNumber reg; regNumber rgx; ssize_t dsp; bool dspInByte; bool dspIsZero; bool isMoffset = false; instruction ins = id->idIns(); emitAttr size = id->idOpSize(); size_t opsz = EA_SIZE_IN_BYTES(size); // Get the base/index registers reg = id->idAddr()->iiaAddrMode.amBaseReg; rgx = id->idAddr()->iiaAddrMode.amIndxReg; // For INS_call the instruction size is actually the return value size if ((ins == INS_call) || (ins == INS_tail_i_jmp)) { if (ins == INS_tail_i_jmp) { // tail call with addressing mode (or through register) needs rex.w // prefix to be recognized by unwinder as part of epilog. code = AddRexWPrefix(ins, code); } // Special case: call via a register if (id->idIsCallRegPtr()) { code = insEncodeMRreg(ins, reg, EA_PTRSIZE, code); dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code); dst += emitOutputWord(dst, code); goto DONE; } // The displacement field is in an unusual place for calls dsp = emitGetInsCIdisp(id); #ifdef TARGET_AMD64 // Compute the REX prefix if it exists if (IsExtendedReg(reg, EA_PTRSIZE)) { insEncodeReg012(ins, reg, EA_PTRSIZE, &code); // TODO-Cleanup: stop casting RegEncoding() back to a regNumber. reg = (regNumber)RegEncoding(reg); } if (IsExtendedReg(rgx, EA_PTRSIZE)) { insEncodeRegSIB(ins, rgx, &code); // TODO-Cleanup: stop casting RegEncoding() back to a regNumber. rgx = (regNumber)RegEncoding(rgx); } // And emit the REX prefix dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code); #endif // TARGET_AMD64 goto GOT_DSP; } // `addc` is used for two kinds if instructions // 1. ins like ADD that can have reg/mem and const versions both and const version needs to modify the opcode for // large constant operand (e.g., imm32) // 2. certain SSE/AVX ins have const operand as control bits that is always 1-Byte (imm8) even if `size` > 1-Byte if (addc && (size > EA_1BYTE)) { ssize_t cval = addc->cnsVal; // Does the constant fit in a byte? // SSE/AVX do not need to modify opcode if ((signed char)cval == cval && addc->cnsReloc == false && ins != INS_mov && ins != INS_test) { if (id->idInsFmt() != IF_ARW_SHF && !IsSSEOrAVXInstruction(ins)) { code |= 2; } opsz = 1; } } #ifdef TARGET_X86 else { // Special case: "mov eax, [addr]" and "mov [addr], eax" // Amd64: this is one case where addr can be 64-bit in size. This is // currently unused or not enabled on amd64 as it always uses RIP // relative addressing which results in smaller instruction size. if ((ins == INS_mov) && (id->idReg1() == REG_EAX) && (reg == REG_NA) && (rgx == REG_NA)) { switch (id->idInsFmt()) { case IF_RWR_ARD: assert(code == (insCodeRM(ins) | (insEncodeReg345(ins, REG_EAX, EA_PTRSIZE, NULL) << 8))); code &= ~((code_t)0xFFFFFFFF); code |= 0xA0; isMoffset = true; break; case IF_AWR_RRD: assert(code == (insCodeMR(ins) | (insEncodeReg345(ins, REG_EAX, EA_PTRSIZE, NULL) << 8))); code &= ~((code_t)0xFFFFFFFF); code |= 0xA2; isMoffset = true; break; default: break; } } } #endif // TARGET_X86 // Emit VEX prefix if required // There are some callers who already add VEX prefix and call this routine. // Therefore, add VEX prefix is one is not already present. code = AddVexPrefixIfNeededAndNotPresent(ins, code, size); // For this format, moves do not support a third operand, so we only need to handle the binary ops. if (TakesVexPrefix(ins)) { if (IsDstDstSrcAVXInstruction(ins)) { regNumber src1 = REG_NA; switch (id->idInsFmt()) { case IF_RWR_RRD_ARD: case IF_RWR_ARD_RRD: case IF_RWR_RRD_ARD_CNS: case IF_RWR_RRD_ARD_RRD: { src1 = id->idReg2(); break; } default: { src1 = id->idReg1(); break; } } // encode source operand reg in 'vvvv' bits in 1's complement form code = insEncodeReg3456(ins, src1, size, code); } else if (IsDstSrcSrcAVXInstruction(ins)) { code = insEncodeReg3456(ins, id->idReg2(), size, code); } } // Emit the REX prefix if required if (TakesRexWPrefix(ins, size)) { code = AddRexWPrefix(ins, code); } if (IsExtendedReg(reg, EA_PTRSIZE)) { insEncodeReg012(ins, reg, EA_PTRSIZE, &code); // TODO-Cleanup: stop casting RegEncoding() back to a regNumber. reg = (regNumber)RegEncoding(reg); } if (IsExtendedReg(rgx, EA_PTRSIZE)) { insEncodeRegSIB(ins, rgx, &code); // TODO-Cleanup: stop casting RegEncoding() back to a regNumber. rgx = (regNumber)RegEncoding(rgx); } // Special case emitting AVX instructions if (EncodedBySSE38orSSE3A(ins) || (ins == INS_crc32)) { if ((ins == INS_crc32) && (size > EA_1BYTE)) { code |= 0x0100; if (size == EA_2BYTE) { dst += emitOutputByte(dst, 0x66); } } regNumber reg345 = REG_NA; if (IsBMIInstruction(ins)) { reg345 = getBmiRegNumber(ins); } if (reg345 == REG_NA) { switch (id->idInsFmt()) { case IF_AWR_RRD_RRD: { reg345 = id->idReg2(); break; } default: { reg345 = id->idReg1(); break; } } } unsigned regcode = insEncodeReg345(ins, reg345, size, &code); dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code); if (UseVEXEncoding() && (ins != INS_crc32)) { // Emit last opcode byte // TODO-XArch-CQ: Right now support 4-byte opcode instructions only assert((code & 0xFF) == 0); dst += emitOutputByte(dst, (code >> 8) & 0xFF); } else { dst += emitOutputWord(dst, code >> 16); dst += emitOutputWord(dst, code & 0xFFFF); } code = regcode; } // Is this a 'big' opcode? else if (code & 0xFF000000) { // Output the REX prefix dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code); // Output the highest word of the opcode // We need to check again as in case of AVX instructions leading opcode bytes are stripped off // and encoded as part of VEX prefix. if (code & 0xFF000000) { dst += emitOutputWord(dst, code >> 16); code &= 0x0000FFFF; } } else if (code & 0x00FF0000) { // BT supports 16 bit operands and this code doesn't handle the necessary 66 prefix. assert(ins != INS_bt); // Output the REX prefix dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code); // Output the highest byte of the opcode if (code & 0x00FF0000) { dst += emitOutputByte(dst, code >> 16); code &= 0x0000FFFF; } // Use the large version if this is not a byte. This trick will not // work in case of SSE2 and AVX instructions. if ((size != EA_1BYTE) && HasRegularWideForm(ins)) { code |= 0x1; } } else if (CodeGen::instIsFP(ins)) { assert(size == EA_4BYTE || size == EA_8BYTE); if (size == EA_8BYTE) { code += 4; } } else if (!IsSSEInstruction(ins) && !IsAVXInstruction(ins)) { /* Is the operand size larger than a byte? */ switch (size) { case EA_1BYTE: break; case EA_2BYTE: /* Output a size prefix for a 16-bit operand */ dst += emitOutputByte(dst, 0x66); FALLTHROUGH; case EA_4BYTE: #ifdef TARGET_AMD64 case EA_8BYTE: #endif /* Set the 'w' bit to get the large version */ code |= 0x1; break; #ifdef TARGET_X86 case EA_8BYTE: /* Double operand - set the appropriate bit */ code |= 0x04; break; #endif // TARGET_X86 default: NO_WAY("unexpected size"); break; } } // Output the REX prefix dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code); // Get the displacement value dsp = emitGetInsAmdAny(id); GOT_DSP: dspInByte = ((signed char)dsp == (ssize_t)dsp); dspIsZero = (dsp == 0); if (id->idIsDspReloc()) { dspInByte = false; // relocs can't be placed in a byte } if (isMoffset) { #ifdef TARGET_AMD64 // This code path should never be hit on amd64 since it always uses RIP relative addressing. // In future if ever there is a need to enable this special case, also enable the logic // that sets isMoffset to true on amd64. unreached(); #else // TARGET_X86 dst += emitOutputByte(dst, code); dst += emitOutputSizeT(dst, dsp); if (id->idIsDspReloc()) { emitRecordRelocation((void*)(dst - TARGET_POINTER_SIZE), (void*)dsp, IMAGE_REL_BASED_MOFFSET); } #endif // TARGET_X86 } // Is there a [scaled] index component? else if (rgx == REG_NA) { // The address is of the form "[reg+disp]" switch (reg) { case REG_NA: { if (id->idIsDspReloc()) { INT32 addlDelta = 0; // The address is of the form "[disp]" // On x86 - disp is relative to zero // On Amd64 - disp is relative to RIP if (EncodedBySSE38orSSE3A(ins) || (ins == INS_crc32)) { dst += emitOutputByte(dst, code | 0x05); } else { dst += emitOutputWord(dst, code | 0x0500); } if (addc) { // It is of the form "ins [disp], imm" or "ins reg, [disp], imm" // For emitting relocation, we also need to take into account of the // additional bytes of code emitted for immed val. ssize_t cval = addc->cnsVal; #ifdef TARGET_AMD64 // all these opcodes only take a sign-extended 4-byte immediate noway_assert(opsz < 8 || ((int)cval == cval && !addc->cnsReloc)); #else // TARGET_X86 noway_assert(opsz <= 4); #endif // TARGET_X86 switch (opsz) { case 0: case 4: case 8: addlDelta = -4; break; case 2: addlDelta = -2; break; case 1: addlDelta = -1; break; default: assert(!"unexpected operand size"); unreached(); } } #ifdef TARGET_AMD64 // We emit zero on Amd64, to avoid the assert in emitOutputLong() dst += emitOutputLong(dst, 0); #else dst += emitOutputLong(dst, dsp); #endif emitRecordRelocation((void*)(dst - sizeof(INT32)), (void*)dsp, IMAGE_REL_BASED_DISP32, 0, addlDelta); } else { #ifdef TARGET_X86 if (EncodedBySSE38orSSE3A(ins) || (ins == INS_crc32)) { dst += emitOutputByte(dst, code | 0x05); } else { dst += emitOutputWord(dst, code | 0x0500); } #else // TARGET_AMD64 // Amd64: addr fits within 32-bits and can be encoded as a displacement relative to zero. // This addr mode should never be used while generating relocatable ngen code nor if // the addr can be encoded as pc-relative address. noway_assert(!emitComp->opts.compReloc); noway_assert(codeGen->genAddrRelocTypeHint((size_t)dsp) != IMAGE_REL_BASED_REL32); noway_assert((int)dsp == dsp); // This requires, specifying a SIB byte after ModRM byte. if (EncodedBySSE38orSSE3A(ins) || (ins == INS_crc32)) { dst += emitOutputByte(dst, code | 0x04); } else { dst += emitOutputWord(dst, code | 0x0400); } dst += emitOutputByte(dst, 0x25); #endif // TARGET_AMD64 dst += emitOutputLong(dst, dsp); } break; } case REG_EBP: { if (EncodedBySSE38orSSE3A(ins) || (ins == INS_crc32)) { // Does the offset fit in a byte? if (dspInByte) { dst += emitOutputByte(dst, code | 0x45); dst += emitOutputByte(dst, dsp); } else { dst += emitOutputByte(dst, code | 0x85); dst += emitOutputLong(dst, dsp); if (id->idIsDspReloc()) { emitRecordRelocation((void*)(dst - sizeof(INT32)), (void*)dsp, IMAGE_REL_BASED_HIGHLOW); } } } else { // Does the offset fit in a byte? if (dspInByte) { dst += emitOutputWord(dst, code | 0x4500); dst += emitOutputByte(dst, dsp); } else { dst += emitOutputWord(dst, code | 0x8500); dst += emitOutputLong(dst, dsp); if (id->idIsDspReloc()) { emitRecordRelocation((void*)(dst - sizeof(INT32)), (void*)dsp, IMAGE_REL_BASED_HIGHLOW); } } } break; } case REG_ESP: { if (EncodedBySSE38orSSE3A(ins) || (ins == INS_crc32)) { // Is the offset 0 or does it at least fit in a byte? if (dspIsZero) { dst += emitOutputByte(dst, code | 0x04); dst += emitOutputByte(dst, 0x24); } else if (dspInByte) { dst += emitOutputByte(dst, code | 0x44); dst += emitOutputByte(dst, 0x24); dst += emitOutputByte(dst, dsp); } else { dst += emitOutputByte(dst, code | 0x84); dst += emitOutputByte(dst, 0x24); dst += emitOutputLong(dst, dsp); if (id->idIsDspReloc()) { emitRecordRelocation((void*)(dst - sizeof(INT32)), (void*)dsp, IMAGE_REL_BASED_HIGHLOW); } } } else { // Is the offset 0 or does it at least fit in a byte? if (dspIsZero) { dst += emitOutputWord(dst, code | 0x0400); dst += emitOutputByte(dst, 0x24); } else if (dspInByte) { dst += emitOutputWord(dst, code | 0x4400); dst += emitOutputByte(dst, 0x24); dst += emitOutputByte(dst, dsp); } else { dst += emitOutputWord(dst, code | 0x8400); dst += emitOutputByte(dst, 0x24); dst += emitOutputLong(dst, dsp); if (id->idIsDspReloc()) { emitRecordRelocation((void*)(dst - sizeof(INT32)), (void*)dsp, IMAGE_REL_BASED_HIGHLOW); } } } break; } default: { if (EncodedBySSE38orSSE3A(ins) || (ins == INS_crc32)) { // Put the register in the opcode code |= insEncodeReg012(ins, reg, EA_PTRSIZE, nullptr); // Is there a displacement? if (dspIsZero) { // This is simply "[reg]" dst += emitOutputByte(dst, code); } else { // This is [reg + dsp]" -- does the offset fit in a byte? if (dspInByte) { dst += emitOutputByte(dst, code | 0x40); dst += emitOutputByte(dst, dsp); } else { dst += emitOutputByte(dst, code | 0x80); dst += emitOutputLong(dst, dsp); if (id->idIsDspReloc()) { emitRecordRelocation((void*)(dst - sizeof(INT32)), (void*)dsp, IMAGE_REL_BASED_HIGHLOW); } } } } else { // Put the register in the opcode code |= insEncodeReg012(ins, reg, EA_PTRSIZE, nullptr) << 8; // Is there a displacement? if (dspIsZero) { // This is simply "[reg]" dst += emitOutputWord(dst, code); } else { // This is [reg + dsp]" -- does the offset fit in a byte? if (dspInByte) { dst += emitOutputWord(dst, code | 0x4000); dst += emitOutputByte(dst, dsp); } else { dst += emitOutputWord(dst, code | 0x8000); dst += emitOutputLong(dst, dsp); if (id->idIsDspReloc()) { emitRecordRelocation((void*)(dst - sizeof(INT32)), (void*)dsp, IMAGE_REL_BASED_HIGHLOW); } } } } break; } } } else { unsigned regByte; // We have a scaled index operand unsigned mul = emitDecodeScale(id->idAddr()->iiaAddrMode.amScale); // Is the index operand scaled? if (mul > 1) { // Is there a base register? if (reg != REG_NA) { // The address is "[reg + {2/4/8} * rgx + icon]" regByte = insEncodeReg012(ins, reg, EA_PTRSIZE, nullptr) | insEncodeReg345(ins, rgx, EA_PTRSIZE, nullptr) | insSSval(mul); if (EncodedBySSE38orSSE3A(ins) || (ins == INS_crc32)) { // Emit [ebp + {2/4/8} * rgz] as [ebp + {2/4/8} * rgx + 0] if (dspIsZero && reg != REG_EBP) { // The address is "[reg + {2/4/8} * rgx]" dst += emitOutputByte(dst, code | 0x04); dst += emitOutputByte(dst, regByte); } else { // The address is "[reg + {2/4/8} * rgx + disp]" if (dspInByte) { dst += emitOutputByte(dst, code | 0x44); dst += emitOutputByte(dst, regByte); dst += emitOutputByte(dst, dsp); } else { dst += emitOutputByte(dst, code | 0x84); dst += emitOutputByte(dst, regByte); dst += emitOutputLong(dst, dsp); if (id->idIsDspReloc()) { emitRecordRelocation((void*)(dst - sizeof(INT32)), (void*)dsp, IMAGE_REL_BASED_HIGHLOW); } } } } else { // Emit [ebp + {2/4/8} * rgz] as [ebp + {2/4/8} * rgx + 0] if (dspIsZero && reg != REG_EBP) { // The address is "[reg + {2/4/8} * rgx]" dst += emitOutputWord(dst, code | 0x0400); dst += emitOutputByte(dst, regByte); } else { // The address is "[reg + {2/4/8} * rgx + disp]" if (dspInByte) { dst += emitOutputWord(dst, code | 0x4400); dst += emitOutputByte(dst, regByte); dst += emitOutputByte(dst, dsp); } else { dst += emitOutputWord(dst, code | 0x8400); dst += emitOutputByte(dst, regByte); dst += emitOutputLong(dst, dsp); if (id->idIsDspReloc()) { emitRecordRelocation((void*)(dst - sizeof(INT32)), (void*)dsp, IMAGE_REL_BASED_HIGHLOW); } } } } } else { // The address is "[{2/4/8} * rgx + icon]" regByte = insEncodeReg012(ins, REG_EBP, EA_PTRSIZE, nullptr) | insEncodeReg345(ins, rgx, EA_PTRSIZE, nullptr) | insSSval(mul); if (EncodedBySSE38orSSE3A(ins) || (ins == INS_crc32)) { dst += emitOutputByte(dst, code | 0x04); } else { dst += emitOutputWord(dst, code | 0x0400); } dst += emitOutputByte(dst, regByte); // Special case: jump through a jump table if (ins == INS_i_jmp) { dsp += (size_t)emitConsBlock; } dst += emitOutputLong(dst, dsp); if (id->idIsDspReloc()) { emitRecordRelocation((void*)(dst - sizeof(INT32)), (void*)dsp, IMAGE_REL_BASED_HIGHLOW); } } } else { // The address is "[reg+rgx+dsp]" regByte = insEncodeReg012(ins, reg, EA_PTRSIZE, nullptr) | insEncodeReg345(ins, rgx, EA_PTRSIZE, nullptr); if (EncodedBySSE38orSSE3A(ins) || (ins == INS_crc32)) { if (dspIsZero && reg != REG_EBP) { // This is [reg+rgx]" dst += emitOutputByte(dst, code | 0x04); dst += emitOutputByte(dst, regByte); } else { // This is [reg+rgx+dsp]" -- does the offset fit in a byte? if (dspInByte) { dst += emitOutputByte(dst, code | 0x44); dst += emitOutputByte(dst, regByte); dst += emitOutputByte(dst, dsp); } else { dst += emitOutputByte(dst, code | 0x84); dst += emitOutputByte(dst, regByte); dst += emitOutputLong(dst, dsp); if (id->idIsDspReloc()) { emitRecordRelocation((void*)(dst - sizeof(INT32)), (void*)dsp, IMAGE_REL_BASED_HIGHLOW); } } } } else { if (dspIsZero && reg != REG_EBP) { // This is [reg+rgx]" dst += emitOutputWord(dst, code | 0x0400); dst += emitOutputByte(dst, regByte); } else { // This is [reg+rgx+dsp]" -- does the offset fit in a byte? if (dspInByte) { dst += emitOutputWord(dst, code | 0x4400); dst += emitOutputByte(dst, regByte); dst += emitOutputByte(dst, dsp); } else { dst += emitOutputWord(dst, code | 0x8400); dst += emitOutputByte(dst, regByte); dst += emitOutputLong(dst, dsp); if (id->idIsDspReloc()) { emitRecordRelocation((void*)(dst - sizeof(INT32)), (void*)dsp, IMAGE_REL_BASED_HIGHLOW); } } } } } } // Now generate the constant value, if present if (addc) { ssize_t cval = addc->cnsVal; #ifdef TARGET_AMD64 // all these opcodes only take a sign-extended 4-byte immediate noway_assert(opsz < 8 || ((int)cval == cval && !addc->cnsReloc)); #endif switch (opsz) { case 0: case 4: case 8: dst += emitOutputLong(dst, cval); break; case 2: dst += emitOutputWord(dst, cval); break; case 1: dst += emitOutputByte(dst, cval); break; default: assert(!"unexpected operand size"); } if (addc->cnsReloc) { emitRecordRelocation((void*)(dst - sizeof(INT32)), (void*)(size_t)cval, IMAGE_REL_BASED_HIGHLOW); assert(opsz == 4); } } DONE: // Does this instruction operate on a GC ref value? if (id->idGCref()) { switch (id->idInsFmt()) { case IF_ARD: case IF_AWR: case IF_ARW: break; case IF_RRD_ARD: break; case IF_RWR_ARD: emitGCregLiveUpd(id->idGCref(), id->idReg1(), dst); break; case IF_RRW_ARD: // Mark the destination register as holding a GCT_BYREF assert(id->idGCref() == GCT_BYREF && (ins == INS_add || ins == INS_sub)); emitGCregLiveUpd(GCT_BYREF, id->idReg1(), dst); break; case IF_ARD_RRD: case IF_AWR_RRD: break; case IF_AWR_RRD_RRD: break; case IF_ARD_CNS: case IF_AWR_CNS: break; case IF_ARW_RRD: case IF_ARW_CNS: assert(id->idGCref() == GCT_BYREF && (ins == INS_add || ins == INS_sub)); break; default: #ifdef DEBUG emitDispIns(id, false, false, false); #endif assert(!"unexpected GC ref instruction format"); } // mul can never produce a GC ref assert(!instrIs3opImul(ins)); assert(ins != INS_mulEAX && ins != INS_imulEAX); } else { if (!emitInsCanOnlyWriteSSE2OrAVXReg(id)) { switch (id->idInsFmt()) { case IF_RWR_ARD: case IF_RRW_ARD: case IF_RWR_RRD_ARD: emitGCregDeadUpd(id->idReg1(), dst); break; default: break; } if (ins == INS_mulEAX || ins == INS_imulEAX) { emitGCregDeadUpd(REG_EAX, dst); emitGCregDeadUpd(REG_EDX, dst); } // For the three operand imul instruction the target register // is encoded in the opcode if (instrIs3opImul(ins)) { regNumber tgtReg = inst3opImulReg(ins); emitGCregDeadUpd(tgtReg, dst); } } } return dst; } /***************************************************************************** * * Output an instruction involving a stack frame value. */ BYTE* emitter::emitOutputSV(BYTE* dst, instrDesc* id, code_t code, CnsVal* addc) { int adr; int dsp; bool EBPbased; bool dspInByte; bool dspIsZero; instruction ins = id->idIns(); emitAttr size = id->idOpSize(); size_t opsz = EA_SIZE_IN_BYTES(size); assert(ins != INS_imul || id->idReg1() == REG_EAX || size == EA_4BYTE || size == EA_8BYTE); // `addc` is used for two kinds if instructions // 1. ins like ADD that can have reg/mem and const versions both and const version needs to modify the opcode for // large constant operand (e.g., imm32) // 2. certain SSE/AVX ins have const operand as control bits that is always 1-Byte (imm8) even if `size` > 1-Byte if (addc && (size > EA_1BYTE)) { ssize_t cval = addc->cnsVal; // Does the constant fit in a byte? // SSE/AVX do not need to modify opcode if ((signed char)cval == cval && addc->cnsReloc == false && ins != INS_mov && ins != INS_test) { if ((id->idInsFmt() != IF_SRW_SHF) && (id->idInsFmt() != IF_RRW_SRD_CNS) && (id->idInsFmt() != IF_RWR_RRD_SRD_CNS) && !IsSSEOrAVXInstruction(ins)) { code |= 2; } opsz = 1; } } // Add VEX prefix if required. // There are some callers who already add VEX prefix and call this routine. // Therefore, add VEX prefix is one is not already present. code = AddVexPrefixIfNeededAndNotPresent(ins, code, size); // Compute the REX prefix if (TakesRexWPrefix(ins, size)) { code = AddRexWPrefix(ins, code); } // Special case emitting AVX instructions if (EncodedBySSE38orSSE3A(ins) || (ins == INS_crc32)) { if ((ins == INS_crc32) && (size > EA_1BYTE)) { code |= 0x0100; if (size == EA_2BYTE) { dst += emitOutputByte(dst, 0x66); } } regNumber reg345 = REG_NA; if (IsBMIInstruction(ins)) { reg345 = getBmiRegNumber(ins); } if (reg345 == REG_NA) { reg345 = id->idReg1(); } else { code = insEncodeReg3456(ins, id->idReg1(), size, code); } unsigned regcode = insEncodeReg345(ins, reg345, size, &code); dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code); if (UseVEXEncoding() && (ins != INS_crc32)) { // Emit last opcode byte // TODO-XArch-CQ: Right now support 4-byte opcode instructions only assert((code & 0xFF) == 0); dst += emitOutputByte(dst, (code >> 8) & 0xFF); } else { dst += emitOutputWord(dst, code >> 16); dst += emitOutputWord(dst, code & 0xFFFF); } code = regcode; } // Is this a 'big' opcode? else if (code & 0xFF000000) { // Output the REX prefix dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code); // Output the highest word of the opcode // We need to check again because in case of AVX instructions the leading // escape byte(s) (e.g. 0x0F) will be encoded as part of VEX prefix. if (code & 0xFF000000) { dst += emitOutputWord(dst, code >> 16); code &= 0x0000FFFF; } } else if (code & 0x00FF0000) { // BT supports 16 bit operands and this code doesn't add the necessary 66 prefix. assert(ins != INS_bt); // Output the REX prefix dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code); // Output the highest byte of the opcode. // We need to check again because in case of AVX instructions the leading // escape byte(s) (e.g. 0x0F) will be encoded as part of VEX prefix. if (code & 0x00FF0000) { dst += emitOutputByte(dst, code >> 16); code &= 0x0000FFFF; } // Use the large version if this is not a byte if ((size != EA_1BYTE) && HasRegularWideForm(ins)) { code |= 0x1; } } else if (CodeGen::instIsFP(ins)) { assert(size == EA_4BYTE || size == EA_8BYTE); if (size == EA_8BYTE) { code += 4; } } else if (!IsSSEInstruction(ins) && !IsAVXInstruction(ins)) { // Is the operand size larger than a byte? switch (size) { case EA_1BYTE: break; case EA_2BYTE: // Output a size prefix for a 16-bit operand dst += emitOutputByte(dst, 0x66); FALLTHROUGH; case EA_4BYTE: #ifdef TARGET_AMD64 case EA_8BYTE: #endif // TARGET_AMD64 /* Set the 'w' size bit to indicate 32-bit operation * Note that incrementing "code" for INS_call (0xFF) would * overflow, whereas setting the lower bit to 1 just works out */ code |= 0x01; break; #ifdef TARGET_X86 case EA_8BYTE: // Double operand - set the appropriate bit. // I don't know what a legitimate reason to end up in this case would be // considering that FP is taken care of above... // what is an instruction that takes a double which is not covered by the // above instIsFP? Of the list in instrsxarch, only INS_fprem code |= 0x04; NO_WAY("bad 8 byte op"); break; #endif // TARGET_X86 default: NO_WAY("unexpected size"); break; } } // Output the REX prefix dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code); // Figure out the variable's frame position int varNum = id->idAddr()->iiaLclVar.lvaVarNum(); adr = emitComp->lvaFrameAddress(varNum, &EBPbased); dsp = adr + id->idAddr()->iiaLclVar.lvaOffset(); dspInByte = ((signed char)dsp == (int)dsp); dspIsZero = (dsp == 0); // for stack varaibles the dsp should never be a reloc assert(id->idIsDspReloc() == 0); if (EBPbased) { // EBP-based variable: does the offset fit in a byte? if (EncodedBySSE38orSSE3A(ins) || (ins == INS_crc32)) { if (dspInByte) { dst += emitOutputByte(dst, code | 0x45); dst += emitOutputByte(dst, dsp); } else { dst += emitOutputByte(dst, code | 0x85); dst += emitOutputLong(dst, dsp); } } else { if (dspInByte) { dst += emitOutputWord(dst, code | 0x4500); dst += emitOutputByte(dst, dsp); } else { dst += emitOutputWord(dst, code | 0x8500); dst += emitOutputLong(dst, dsp); } } } else { #if !FEATURE_FIXED_OUT_ARGS // Adjust the offset by the amount currently pushed on the CPU stack dsp += emitCurStackLvl; #endif dspInByte = ((signed char)dsp == (int)dsp); dspIsZero = (dsp == 0); // Does the offset fit in a byte? if (EncodedBySSE38orSSE3A(ins) || (ins == INS_crc32)) { if (dspInByte) { if (dspIsZero) { dst += emitOutputByte(dst, code | 0x04); dst += emitOutputByte(dst, 0x24); } else { dst += emitOutputByte(dst, code | 0x44); dst += emitOutputByte(dst, 0x24); dst += emitOutputByte(dst, dsp); } } else { dst += emitOutputByte(dst, code | 0x84); dst += emitOutputByte(dst, 0x24); dst += emitOutputLong(dst, dsp); } } else { if (dspInByte) { if (dspIsZero) { dst += emitOutputWord(dst, code | 0x0400); dst += emitOutputByte(dst, 0x24); } else { dst += emitOutputWord(dst, code | 0x4400); dst += emitOutputByte(dst, 0x24); dst += emitOutputByte(dst, dsp); } } else { dst += emitOutputWord(dst, code | 0x8400); dst += emitOutputByte(dst, 0x24); dst += emitOutputLong(dst, dsp); } } } // Now generate the constant value, if present if (addc) { ssize_t cval = addc->cnsVal; #ifdef TARGET_AMD64 // all these opcodes only take a sign-extended 4-byte immediate noway_assert(opsz < 8 || ((int)cval == cval && !addc->cnsReloc)); #endif switch (opsz) { case 0: case 4: case 8: dst += emitOutputLong(dst, cval); break; case 2: dst += emitOutputWord(dst, cval); break; case 1: dst += emitOutputByte(dst, cval); break; default: assert(!"unexpected operand size"); } if (addc->cnsReloc) { emitRecordRelocation((void*)(dst - sizeof(INT32)), (void*)(size_t)cval, IMAGE_REL_BASED_HIGHLOW); assert(opsz == 4); } } // Does this instruction operate on a GC ref value? if (id->idGCref()) { // Factor in the sub-variable offset adr += AlignDown(id->idAddr()->iiaLclVar.lvaOffset(), TARGET_POINTER_SIZE); switch (id->idInsFmt()) { case IF_SRD: // Read stack -- no change break; case IF_SWR: // Stack Write (So we need to update GC live for stack var) // Write stack -- GC var may be born emitGCvarLiveUpd(adr, varNum, id->idGCref(), dst DEBUG_ARG(varNum)); break; case IF_SRD_CNS: // Read stack -- no change break; case IF_SWR_CNS: // Write stack -- no change break; case IF_SRD_RRD: case IF_RRD_SRD: // Read stack , read register -- no change break; case IF_RWR_SRD: // Register Write, Stack Read (So we need to update GC live for register) // Read stack , write register -- GC reg may be born emitGCregLiveUpd(id->idGCref(), id->idReg1(), dst); break; case IF_SWR_RRD: // Stack Write, Register Read (So we need to update GC live for stack var) // Read register, write stack -- GC var may be born emitGCvarLiveUpd(adr, varNum, id->idGCref(), dst DEBUG_ARG(varNum)); break; case IF_RRW_SRD: // Register Read/Write, Stack Read (So we need to update GC live for register) // reg could have been a GCREF as GCREF + int=BYREF // or BYREF+/-int=BYREF assert(id->idGCref() == GCT_BYREF && (ins == INS_add || ins == INS_sub)); emitGCregLiveUpd(id->idGCref(), id->idReg1(), dst); break; case IF_SRW_CNS: case IF_SRW_RRD: // += -= of a byref, no change case IF_SRW: break; default: #ifdef DEBUG emitDispIns(id, false, false, false); #endif assert(!"unexpected GC ref instruction format"); } } else { if (!emitInsCanOnlyWriteSSE2OrAVXReg(id)) { switch (id->idInsFmt()) { case IF_RWR_SRD: // Register Write, Stack Read case IF_RRW_SRD: // Register Read/Write, Stack Read case IF_RWR_RRD_SRD: emitGCregDeadUpd(id->idReg1(), dst); break; default: break; } if (ins == INS_mulEAX || ins == INS_imulEAX) { emitGCregDeadUpd(REG_EAX, dst); emitGCregDeadUpd(REG_EDX, dst); } // For the three operand imul instruction the target register // is encoded in the opcode if (instrIs3opImul(ins)) { regNumber tgtReg = inst3opImulReg(ins); emitGCregDeadUpd(tgtReg, dst); } } } return dst; } /***************************************************************************** * * Output an instruction with a static data member (class variable). */ BYTE* emitter::emitOutputCV(BYTE* dst, instrDesc* id, code_t code, CnsVal* addc) { BYTE* addr; CORINFO_FIELD_HANDLE fldh; ssize_t offs; int doff; emitAttr size = id->idOpSize(); size_t opsz = EA_SIZE_IN_BYTES(size); instruction ins = id->idIns(); bool isMoffset = false; // Get hold of the field handle and offset fldh = id->idAddr()->iiaFieldHnd; offs = emitGetInsDsp(id); // Special case: mov reg, fs:[ddd] if (fldh == FLD_GLOBAL_FS) { dst += emitOutputByte(dst, 0x64); } // Compute VEX prefix // Some of its callers already add VEX prefix and then call this routine. // Therefore add VEX prefix is not already present. code = AddVexPrefixIfNeededAndNotPresent(ins, code, size); // Compute the REX prefix if (TakesRexWPrefix(ins, size)) { code = AddRexWPrefix(ins, code); } // `addc` is used for two kinds if instructions // 1. ins like ADD that can have reg/mem and const versions both and const version needs to modify the opcode for // large constant operand (e.g., imm32) // 2. certain SSE/AVX ins have const operand as control bits that is always 1-Byte (imm8) even if `size` > 1-Byte if (addc && (size > EA_1BYTE)) { ssize_t cval = addc->cnsVal; // Does the constant fit in a byte? if ((signed char)cval == cval && addc->cnsReloc == false && ins != INS_mov && ins != INS_test) { // SSE/AVX do not need to modify opcode if (id->idInsFmt() != IF_MRW_SHF && !IsSSEOrAVXInstruction(ins)) { code |= 2; } opsz = 1; } } #ifdef TARGET_X86 else { // Special case: "mov eax, [addr]" and "mov [addr], eax" // Amd64: this is one case where addr can be 64-bit in size. This is // currently unused or not enabled on amd64 as it always uses RIP // relative addressing which results in smaller instruction size. if (ins == INS_mov && id->idReg1() == REG_EAX) { switch (id->idInsFmt()) { case IF_RWR_MRD: assert(code == (insCodeRM(ins) | (insEncodeReg345(ins, REG_EAX, EA_PTRSIZE, NULL) << 8) | 0x0500)); code &= ~((code_t)0xFFFFFFFF); code |= 0xA0; isMoffset = true; break; case IF_MWR_RRD: assert(code == (insCodeMR(ins) | (insEncodeReg345(ins, REG_EAX, EA_PTRSIZE, NULL) << 8) | 0x0500)); code &= ~((code_t)0xFFFFFFFF); code |= 0xA2; isMoffset = true; break; default: break; } } } #endif // TARGET_X86 // Special case emitting AVX instructions if (EncodedBySSE38orSSE3A(ins) || (ins == INS_crc32)) { if ((ins == INS_crc32) && (size > EA_1BYTE)) { code |= 0x0100; if (size == EA_2BYTE) { dst += emitOutputByte(dst, 0x66); } } regNumber reg345 = REG_NA; if (IsBMIInstruction(ins)) { reg345 = getBmiRegNumber(ins); } if (reg345 == REG_NA) { reg345 = id->idReg1(); } else { code = insEncodeReg3456(ins, id->idReg1(), size, code); } unsigned regcode = insEncodeReg345(ins, reg345, size, &code); dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code); if (UseVEXEncoding() && (ins != INS_crc32)) { // Emit last opcode byte // TODO-XArch-CQ: Right now support 4-byte opcode instructions only assert((code & 0xFF) == 0); dst += emitOutputByte(dst, (code >> 8) & 0xFF); } else { dst += emitOutputWord(dst, code >> 16); dst += emitOutputWord(dst, code & 0xFFFF); } // Emit Mod,R/M byte dst += emitOutputByte(dst, regcode | 0x05); code = 0; } // Is this a 'big' opcode? else if (code & 0xFF000000) { // Output the REX prefix dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code); // Output the highest word of the opcode. // Check again since AVX instructions encode leading opcode bytes as part of VEX prefix. if (code & 0xFF000000) { dst += emitOutputWord(dst, code >> 16); } code &= 0x0000FFFF; } else if (code & 0x00FF0000) { // Output the REX prefix dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code); // Check again as VEX prefix would have encoded leading opcode byte if (code & 0x00FF0000) { dst += emitOutputByte(dst, code >> 16); code &= 0x0000FFFF; } if (size != EA_1BYTE && HasRegularWideForm(ins)) { code |= 0x1; } } else if (CodeGen::instIsFP(ins)) { assert(size == EA_4BYTE || size == EA_8BYTE); if (size == EA_8BYTE) { code += 4; } } else { // Is the operand size larger than a byte? switch (size) { case EA_1BYTE: break; case EA_2BYTE: // Output a size prefix for a 16-bit operand dst += emitOutputByte(dst, 0x66); FALLTHROUGH; case EA_4BYTE: #ifdef TARGET_AMD64 case EA_8BYTE: #endif // Set the 'w' bit to get the large version code |= 0x1; break; #ifdef TARGET_X86 case EA_8BYTE: // Double operand - set the appropriate bit code |= 0x04; break; #endif // TARGET_X86 default: assert(!"unexpected size"); } } // Output the REX prefix dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code); if (code) { if (id->idInsFmt() == IF_MRD_OFF || id->idInsFmt() == IF_RWR_MRD_OFF || isMoffset) { dst += emitOutputByte(dst, code); } else { dst += emitOutputWord(dst, code); } } // Do we have a constant or a static data member? doff = Compiler::eeGetJitDataOffs(fldh); if (doff >= 0) { addr = emitConsBlock + doff; #ifdef DEBUG int byteSize = EA_SIZE_IN_BYTES(emitGetMemOpSize(id)); // Check that the offset is properly aligned (i.e. the ddd in [ddd]) // When SMALL_CODE is set, we only expect 4-byte alignment, otherwise // we expect the same alignment as the size of the constant. assert((emitChkAlign == false) || (ins == INS_lea) || ((emitComp->compCodeOpt() == Compiler::SMALL_CODE) && (((size_t)addr & 3) == 0)) || (((size_t)addr & (byteSize - 1)) == 0)); #endif // DEBUG } else { // Special case: mov reg, fs:[ddd] or mov reg, [ddd] if (jitStaticFldIsGlobAddr(fldh)) { addr = nullptr; } else { addr = (BYTE*)emitComp->info.compCompHnd->getFieldAddress(fldh, nullptr); if (addr == nullptr) { NO_WAY("could not obtain address of static field"); } } } BYTE* target = (addr + offs); if (!isMoffset) { INT32 addlDelta = 0; if (addc) { // It is of the form "ins [disp], imm" or "ins reg, [disp], imm" // For emitting relocation, we also need to take into account of the // additional bytes of code emitted for immed val. ssize_t cval = addc->cnsVal; #ifdef TARGET_AMD64 // all these opcodes only take a sign-extended 4-byte immediate noway_assert(opsz < 8 || ((int)cval == cval && !addc->cnsReloc)); #else // TARGET_X86 noway_assert(opsz <= 4); #endif // TARGET_X86 switch (opsz) { case 0: case 4: case 8: addlDelta = -4; break; case 2: addlDelta = -2; break; case 1: addlDelta = -1; break; default: assert(!"unexpected operand size"); unreached(); } } #ifdef TARGET_AMD64 // All static field and data section constant accesses should be marked as relocatable noway_assert(id->idIsDspReloc()); dst += emitOutputLong(dst, 0); #else // TARGET_X86 dst += emitOutputLong(dst, (int)(ssize_t)target); #endif // TARGET_X86 if (id->idIsDspReloc()) { emitRecordRelocation((void*)(dst - sizeof(int)), target, IMAGE_REL_BASED_DISP32, 0, addlDelta); } } else { #ifdef TARGET_AMD64 // This code path should never be hit on amd64 since it always uses RIP relative addressing. // In future if ever there is a need to enable this special case, also enable the logic // that sets isMoffset to true on amd64. unreached(); #else // TARGET_X86 dst += emitOutputSizeT(dst, (ssize_t)target); if (id->idIsDspReloc()) { emitRecordRelocation((void*)(dst - TARGET_POINTER_SIZE), target, IMAGE_REL_BASED_MOFFSET); } #endif // TARGET_X86 } // Now generate the constant value, if present if (addc) { ssize_t cval = addc->cnsVal; #ifdef TARGET_AMD64 // all these opcodes only take a sign-extended 4-byte immediate noway_assert(opsz < 8 || ((int)cval == cval && !addc->cnsReloc)); #endif switch (opsz) { case 0: case 4: case 8: dst += emitOutputLong(dst, cval); break; case 2: dst += emitOutputWord(dst, cval); break; case 1: dst += emitOutputByte(dst, cval); break; default: assert(!"unexpected operand size"); } if (addc->cnsReloc) { emitRecordRelocation((void*)(dst - sizeof(INT32)), (void*)(size_t)cval, IMAGE_REL_BASED_HIGHLOW); assert(opsz == 4); } } // Does this instruction operate on a GC ref value? if (id->idGCref()) { switch (id->idInsFmt()) { case IF_MRD: case IF_MRW: case IF_MWR: break; case IF_RRD_MRD: break; case IF_RWR_MRD: emitGCregLiveUpd(id->idGCref(), id->idReg1(), dst); break; case IF_MRD_RRD: case IF_MWR_RRD: case IF_MRW_RRD: break; case IF_MRD_CNS: case IF_MWR_CNS: case IF_MRW_CNS: break; case IF_RRW_MRD: assert(id->idGCref() == GCT_BYREF); assert(ins == INS_add || ins == INS_sub); // Mark it as holding a GCT_BYREF emitGCregLiveUpd(GCT_BYREF, id->idReg1(), dst); break; default: #ifdef DEBUG emitDispIns(id, false, false, false); #endif assert(!"unexpected GC ref instruction format"); } } else { if (!emitInsCanOnlyWriteSSE2OrAVXReg(id)) { switch (id->idInsFmt()) { case IF_RWR_MRD: case IF_RRW_MRD: case IF_RWR_RRD_MRD: emitGCregDeadUpd(id->idReg1(), dst); break; default: break; } if (ins == INS_mulEAX || ins == INS_imulEAX) { emitGCregDeadUpd(REG_EAX, dst); emitGCregDeadUpd(REG_EDX, dst); } // For the three operand imul instruction the target register // is encoded in the opcode if (instrIs3opImul(ins)) { regNumber tgtReg = inst3opImulReg(ins); emitGCregDeadUpd(tgtReg, dst); } } } return dst; } /***************************************************************************** * * Output an instruction with one register operand. */ BYTE* emitter::emitOutputR(BYTE* dst, instrDesc* id) { code_t code; instruction ins = id->idIns(); regNumber reg = id->idReg1(); emitAttr size = id->idOpSize(); // We would to update GC info correctly assert(!IsSSEInstruction(ins)); assert(!IsAVXInstruction(ins)); // Get the 'base' opcode switch (ins) { case INS_inc: case INS_dec: #ifdef TARGET_AMD64 if (true) #else if (size == EA_1BYTE) #endif { assert(INS_inc_l == INS_inc + 1); assert(INS_dec_l == INS_dec + 1); // Can't use the compact form, use the long form ins = (instruction)(ins + 1); if (size == EA_2BYTE) { // Output a size prefix for a 16-bit operand dst += emitOutputByte(dst, 0x66); } code = insCodeRR(ins); if (size != EA_1BYTE) { // Set the 'w' bit to get the large version code |= 0x1; } if (TakesRexWPrefix(ins, size)) { code = AddRexWPrefix(ins, code); } // Register... unsigned regcode = insEncodeReg012(ins, reg, size, &code); // Output the REX prefix dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code); dst += emitOutputWord(dst, code | (regcode << 8)); } else { if (size == EA_2BYTE) { // Output a size prefix for a 16-bit operand dst += emitOutputByte(dst, 0x66); } dst += emitOutputByte(dst, insCodeRR(ins) | insEncodeReg012(ins, reg, size, nullptr)); } break; case INS_pop: case INS_pop_hide: case INS_push: case INS_push_hide: assert(size == EA_PTRSIZE); code = insEncodeOpreg(ins, reg, size); assert(!TakesVexPrefix(ins)); assert(!TakesRexWPrefix(ins, size)); // Output the REX prefix dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code); dst += emitOutputByte(dst, code); break; case INS_bswap: { assert(size >= EA_4BYTE && size <= EA_PTRSIZE); // 16-bit BSWAP is undefined // The Intel instruction set reference for BSWAP states that extended registers // should be enabled via REX.R, but per Vol. 2A, Sec. 2.2.1.2 (see also Figure 2-7), // REX.B should instead be used if the register is encoded in the opcode byte itself. // Therefore the default logic of insEncodeReg012 is correct for this case. code = insCodeRR(ins); if (TakesRexWPrefix(ins, size)) { code = AddRexWPrefix(ins, code); } // Register... unsigned regcode = insEncodeReg012(ins, reg, size, &code); // Output the REX prefix dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code); dst += emitOutputWord(dst, code | (regcode << 8)); break; } case INS_seto: case INS_setno: case INS_setb: case INS_setae: case INS_sete: case INS_setne: case INS_setbe: case INS_seta: case INS_sets: case INS_setns: case INS_setp: case INS_setnp: case INS_setl: case INS_setge: case INS_setle: case INS_setg: assert(id->idGCref() == GCT_NONE); assert(size == EA_1BYTE); code = insEncodeMRreg(ins, reg, EA_1BYTE, insCodeMR(ins)); // Output the REX prefix dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code); // We expect this to always be a 'big' opcode assert(code & 0x00FF0000); dst += emitOutputByte(dst, code >> 16); dst += emitOutputWord(dst, code & 0x0000FFFF); break; case INS_mulEAX: case INS_imulEAX: // Kill off any GC refs in EAX or EDX emitGCregDeadUpd(REG_EAX, dst); emitGCregDeadUpd(REG_EDX, dst); FALLTHROUGH; default: assert(id->idGCref() == GCT_NONE); code = insEncodeMRreg(ins, reg, size, insCodeMR(ins)); if (size != EA_1BYTE) { // Set the 'w' bit to get the large version code |= 0x1; if (size == EA_2BYTE) { // Output a size prefix for a 16-bit operand dst += emitOutputByte(dst, 0x66); } } code = AddVexPrefixIfNeeded(ins, code, size); if (TakesRexWPrefix(ins, size)) { code = AddRexWPrefix(ins, code); } // Output the REX prefix dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code); dst += emitOutputWord(dst, code); break; } // Are we writing the register? if so then update the GC information switch (id->idInsFmt()) { case IF_RRD: break; case IF_RWR: if (id->idGCref()) { emitGCregLiveUpd(id->idGCref(), id->idReg1(), dst); } else { emitGCregDeadUpd(id->idReg1(), dst); } break; case IF_RRW: { #ifdef DEBUG regMaskTP regMask = genRegMask(reg); #endif if (id->idGCref()) { assert(ins == INS_inc || ins == INS_dec || ins == INS_inc_l || ins == INS_dec_l); // We would like to assert that the reg must currently be holding either a gcref or a byref. // However, we can see cases where a LCLHEAP generates a non-gcref value into a register, // and the first instruction we generate after the LCLHEAP is an `inc` that is typed as // byref. We'll properly create the byref gcinfo when this happens. // assert((emitThisGCrefRegs | emitThisByrefRegs) & regMask); assert(id->idGCref() == GCT_BYREF); // Mark it as holding a GCT_BYREF emitGCregLiveUpd(GCT_BYREF, id->idReg1(), dst); } else { // Can't use RRW to trash a GC ref. It's OK for unverifiable code // to trash Byrefs. assert((emitThisGCrefRegs & regMask) == 0); } } break; default: #ifdef DEBUG emitDispIns(id, false, false, false); #endif assert(!"unexpected instruction format"); break; } return dst; } /***************************************************************************** * * Output an instruction with two register operands. */ BYTE* emitter::emitOutputRR(BYTE* dst, instrDesc* id) { code_t code; instruction ins = id->idIns(); regNumber reg1 = id->idReg1(); regNumber reg2 = id->idReg2(); emitAttr size = id->idOpSize(); if (IsSSEOrAVXInstruction(ins)) { assert((ins != INS_movd) || (isFloatReg(reg1) != isFloatReg(reg2))); if ((ins != INS_movd) || isFloatReg(reg1)) { code = insCodeRM(ins); } else { code = insCodeMR(ins); } code = AddVexPrefixIfNeeded(ins, code, size); code = insEncodeRMreg(ins, code); if (TakesRexWPrefix(ins, size)) { code = AddRexWPrefix(ins, code); } } else if ((ins == INS_movsx) || (ins == INS_movzx) || (insIsCMOV(ins))) { assert(hasCodeRM(ins) && !hasCodeMI(ins) && !hasCodeMR(ins)); code = insCodeRM(ins); code = AddVexPrefixIfNeeded(ins, code, size); code = insEncodeRMreg(ins, code) | (int)(size == EA_2BYTE); #ifdef TARGET_AMD64 assert((size < EA_4BYTE) || (insIsCMOV(ins))); if ((size == EA_8BYTE) || (ins == INS_movsx)) { code = AddRexWPrefix(ins, code); } } else if (ins == INS_movsxd) { assert(hasCodeRM(ins) && !hasCodeMI(ins) && !hasCodeMR(ins)); code = insCodeRM(ins); code = AddVexPrefixIfNeeded(ins, code, size); code = insEncodeRMreg(ins, code); #endif // TARGET_AMD64 } #ifdef FEATURE_HW_INTRINSICS else if ((ins == INS_bsf) || (ins == INS_bsr) || (ins == INS_crc32) || (ins == INS_lzcnt) || (ins == INS_popcnt) || (ins == INS_tzcnt)) { assert(hasCodeRM(ins) && !hasCodeMI(ins) && !hasCodeMR(ins)); code = insCodeRM(ins); code = AddVexPrefixIfNeeded(ins, code, size); code = insEncodeRMreg(ins, code); if ((ins == INS_crc32) && (size > EA_1BYTE)) { code |= 0x0100; } if (size == EA_2BYTE) { assert(ins == INS_crc32); dst += emitOutputByte(dst, 0x66); } else if (size == EA_8BYTE) { code = AddRexWPrefix(ins, code); } } #endif // FEATURE_HW_INTRINSICS else { assert(!TakesVexPrefix(ins)); code = insCodeMR(ins); code = insEncodeMRreg(ins, code); if (ins != INS_test) { code |= 2; } switch (size) { case EA_1BYTE: noway_assert(RBM_BYTE_REGS & genRegMask(reg1)); noway_assert(RBM_BYTE_REGS & genRegMask(reg2)); break; case EA_2BYTE: // Output a size prefix for a 16-bit operand dst += emitOutputByte(dst, 0x66); FALLTHROUGH; case EA_4BYTE: // Set the 'w' bit to get the large version code |= 0x1; break; #ifdef TARGET_AMD64 case EA_8BYTE: // TODO-AMD64-CQ: Better way to not emit REX.W when we don't need it // Don't need to zero out the high bits explicitly if ((ins != INS_xor) || (reg1 != reg2)) { code = AddRexWPrefix(ins, code); } else { id->idOpSize(EA_4BYTE); } // Set the 'w' bit to get the large version code |= 0x1; break; #endif // TARGET_AMD64 default: assert(!"unexpected size"); } } regNumber regFor012Bits = reg2; regNumber regFor345Bits = REG_NA; if (IsBMIInstruction(ins)) { regFor345Bits = getBmiRegNumber(ins); } if (regFor345Bits == REG_NA) { regFor345Bits = reg1; } if (ins == INS_movd) { assert(isFloatReg(reg1) != isFloatReg(reg2)); if (isFloatReg(reg2)) { std::swap(regFor012Bits, regFor345Bits); } } unsigned regCode = insEncodeReg345(ins, regFor345Bits, size, &code); regCode |= insEncodeReg012(ins, regFor012Bits, size, &code); if (TakesVexPrefix(ins)) { // In case of AVX instructions that take 3 operands, we generally want to encode reg1 // as first source. In this case, reg1 is both a source and a destination. // The exception is the "merge" 3-operand case, where we have a move instruction, such // as movss, and we want to merge the source with itself. // // TODO-XArch-CQ: Eventually we need to support 3 operand instruction formats. For // now we use the single source as source1 and source2. if (IsDstDstSrcAVXInstruction(ins)) { // encode source/dest operand reg in 'vvvv' bits in 1's complement form code = insEncodeReg3456(ins, reg1, size, code); } else if (IsDstSrcSrcAVXInstruction(ins)) { // encode source operand reg in 'vvvv' bits in 1's complement form code = insEncodeReg3456(ins, reg2, size, code); } } // Output the REX prefix dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code); if (code & 0xFF000000) { // Output the highest word of the opcode dst += emitOutputWord(dst, code >> 16); code &= 0x0000FFFF; if (Is4ByteSSEInstruction(ins)) { // Output 3rd byte of the opcode dst += emitOutputByte(dst, code); code &= 0xFF00; } } else if (code & 0x00FF0000) { dst += emitOutputByte(dst, code >> 16); code &= 0x0000FFFF; } // TODO-XArch-CQ: Right now support 4-byte opcode instructions only if ((code & 0xFF00) == 0xC000) { dst += emitOutputWord(dst, code | (regCode << 8)); } else if ((code & 0xFF) == 0x00) { // This case happens for some SSE/AVX instructions only assert(IsAVXInstruction(ins) || Is4ByteSSEInstruction(ins)); dst += emitOutputByte(dst, (code >> 8) & 0xFF); dst += emitOutputByte(dst, (0xC0 | regCode)); } else { dst += emitOutputWord(dst, code); dst += emitOutputByte(dst, (0xC0 | regCode)); } // Does this instruction operate on a GC ref value? if (id->idGCref()) { switch (id->idInsFmt()) { case IF_RRD_RRD: break; case IF_RWR_RRD: if (emitSyncThisObjReg != REG_NA && emitIGisInProlog(emitCurIG) && reg2 == (int)REG_ARG_0) { // We're relocating "this" in the prolog assert(emitComp->lvaIsOriginalThisArg(0)); assert(emitComp->lvaTable[0].lvRegister); assert(emitComp->lvaTable[0].GetRegNum() == reg1); if (emitFullGCinfo) { emitGCregLiveSet(id->idGCref(), genRegMask(reg1), dst, true); break; } else { /* If emitFullGCinfo==false, the we don't use any regPtrDsc's and so explictly note the location of "this" in GCEncode.cpp */ } } emitGCregLiveUpd(id->idGCref(), reg1, dst); break; case IF_RRW_RRD: switch (id->idIns()) { /* This must be one of the following cases: xor reg, reg to assign NULL and r1 , r2 if (ptr1 && ptr2) ... or r1 , r2 if (ptr1 || ptr2) ... add r1 , r2 to compute a normal byref sub r1 , r2 to compute a strange byref (VC only) */ case INS_xor: assert(reg1 == reg2); emitGCregLiveUpd(id->idGCref(), reg1, dst); break; case INS_or: case INS_and: emitGCregDeadUpd(reg1, dst); break; case INS_add: case INS_sub: assert(id->idGCref() == GCT_BYREF); #if 0 #ifdef DEBUG // Due to elided register moves, we can't have the following assert. // For example, consider: // t85 = LCL_VAR byref V01 arg1 rdx (last use) REG rdx // /--* t85 byref // * STORE_LCL_VAR byref V40 tmp31 rdx REG rdx // Here, V01 is type `long` on entry, then is stored as a byref. But because // the register allocator assigned the same register, no instruction was // generated, and we only (currently) make gcref/byref changes in emitter GC info // when an instruction is generated. We still generate correct GC info, as this // instruction, if writing a GC ref even through reading a long, will go live here. // These situations typically occur due to unsafe casting, such as with Span<T>. regMaskTP regMask; regMask = genRegMask(reg1) | genRegMask(reg2); // r1/r2 could have been a GCREF as GCREF + int=BYREF // or BYREF+/-int=BYREF assert(((regMask & emitThisGCrefRegs) && (ins == INS_add)) || ((regMask & emitThisByrefRegs) && (ins == INS_add || ins == INS_sub))); #endif // DEBUG #endif // 0 // Mark r1 as holding a byref emitGCregLiveUpd(GCT_BYREF, reg1, dst); break; default: #ifdef DEBUG emitDispIns(id, false, false, false); #endif assert(!"unexpected GC reg update instruction"); } break; case IF_RRW_RRW: // This must be "xchg reg1, reg2" assert(id->idIns() == INS_xchg); // If we got here, the GC-ness of the registers doesn't match, so we have to "swap" them in the GC // register pointer mask. GCtype gc1, gc2; gc1 = emitRegGCtype(reg1); gc2 = emitRegGCtype(reg2); if (gc1 != gc2) { // Kill the GC-info about the GC registers if (needsGC(gc1)) { emitGCregDeadUpd(reg1, dst); } if (needsGC(gc2)) { emitGCregDeadUpd(reg2, dst); } // Now, swap the info if (needsGC(gc1)) { emitGCregLiveUpd(gc1, reg2, dst); } if (needsGC(gc2)) { emitGCregLiveUpd(gc2, reg1, dst); } } break; default: #ifdef DEBUG emitDispIns(id, false, false, false); #endif assert(!"unexpected GC ref instruction format"); } } else { if (!emitInsCanOnlyWriteSSE2OrAVXReg(id)) { switch (id->idInsFmt()) { case IF_RRD_CNS: // INS_mulEAX can not be used with any of these formats assert(ins != INS_mulEAX && ins != INS_imulEAX); // For the three operand imul instruction the target // register is encoded in the opcode if (instrIs3opImul(ins)) { regNumber tgtReg = inst3opImulReg(ins); emitGCregDeadUpd(tgtReg, dst); } break; case IF_RWR_RRD: case IF_RRW_RRD: case IF_RWR_RRD_RRD: emitGCregDeadUpd(reg1, dst); break; default: break; } } } return dst; } BYTE* emitter::emitOutputRRR(BYTE* dst, instrDesc* id) { code_t code; instruction ins = id->idIns(); assert(IsAVXInstruction(ins)); assert(IsThreeOperandAVXInstruction(ins) || isAvxBlendv(ins)); regNumber targetReg = id->idReg1(); regNumber src1 = id->idReg2(); regNumber src2 = id->idReg3(); emitAttr size = id->idOpSize(); code = insCodeRM(ins); code = AddVexPrefixIfNeeded(ins, code, size); code = insEncodeRMreg(ins, code); if (TakesRexWPrefix(ins, size)) { code = AddRexWPrefix(ins, code); } unsigned regCode = insEncodeReg345(ins, targetReg, size, &code); regCode |= insEncodeReg012(ins, src2, size, &code); // encode source operand reg in 'vvvv' bits in 1's complement form code = insEncodeReg3456(ins, src1, size, code); // Output the REX prefix dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code); // Is this a 'big' opcode? if (code & 0xFF000000) { // Output the highest word of the opcode dst += emitOutputWord(dst, code >> 16); code &= 0x0000FFFF; } else if (code & 0x00FF0000) { dst += emitOutputByte(dst, code >> 16); code &= 0x0000FFFF; } // TODO-XArch-CQ: Right now support 4-byte opcode instructions only if ((code & 0xFF00) == 0xC000) { dst += emitOutputWord(dst, code | (regCode << 8)); } else if ((code & 0xFF) == 0x00) { // This case happens for AVX instructions only assert(IsAVXInstruction(ins)); dst += emitOutputByte(dst, (code >> 8) & 0xFF); dst += emitOutputByte(dst, (0xC0 | regCode)); } else { dst += emitOutputWord(dst, code); dst += emitOutputByte(dst, (0xC0 | regCode)); } noway_assert(!id->idGCref()); if (!emitInsCanOnlyWriteSSE2OrAVXReg(id)) { switch (id->idInsFmt()) { case IF_RWR_RRD_RRD: case IF_RWR_RRD_RRD_CNS: case IF_RWR_RRD_RRD_RRD: emitGCregDeadUpd(id->idReg1(), dst); break; default: break; } } return dst; } /***************************************************************************** * * Output an instruction with a register and constant operands. */ BYTE* emitter::emitOutputRI(BYTE* dst, instrDesc* id) { code_t code; emitAttr size = id->idOpSize(); instruction ins = id->idIns(); regNumber reg = id->idReg1(); ssize_t val = emitGetInsSC(id); bool valInByte = ((signed char)val == (target_ssize_t)val) && (ins != INS_mov) && (ins != INS_test); // BT reg,imm might be useful but it requires special handling of the immediate value // (it is always encoded in a byte). Let's not complicate things until this is needed. assert(ins != INS_bt); if (id->idIsCnsReloc()) { valInByte = false; // relocs can't be placed in a byte } noway_assert(emitVerifyEncodable(ins, size, reg)); if (IsSSEOrAVXInstruction(ins)) { // Handle SSE2 instructions of the form "opcode reg, immed8" assert(id->idGCref() == GCT_NONE); assert(valInByte); // The left and right shifts use the same encoding, and are distinguished by the Reg/Opcode field. regNumber regOpcode = getSseShiftRegNumber(ins); // Get the 'base' opcode. code = insCodeMI(ins); code = AddVexPrefixIfNeeded(ins, code, size); code = insEncodeMIreg(ins, reg, size, code); assert(code & 0x00FF0000); if (TakesVexPrefix(ins)) { // The 'vvvv' bits encode the destination register, which for this case (RI) // is the same as the source. code = insEncodeReg3456(ins, reg, size, code); } unsigned regcode = (insEncodeReg345(ins, regOpcode, size, &code) | insEncodeReg012(ins, reg, size, &code)) << 8; // Output the REX prefix dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code); if (code & 0xFF000000) { dst += emitOutputWord(dst, code >> 16); } else if (code & 0xFF0000) { dst += emitOutputByte(dst, code >> 16); } dst += emitOutputWord(dst, code | regcode); dst += emitOutputByte(dst, val); return dst; } // The 'mov' opcode is special if (ins == INS_mov) { code = insCodeACC(ins); assert(code < 0x100); code |= 0x08; // Set the 'w' bit unsigned regcode = insEncodeReg012(ins, reg, size, &code); code |= regcode; // This is INS_mov and will not take VEX prefix assert(!TakesVexPrefix(ins)); if (TakesRexWPrefix(ins, size)) { code = AddRexWPrefix(ins, code); } dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code); dst += emitOutputByte(dst, code); if (size == EA_4BYTE) { dst += emitOutputLong(dst, val); } #ifdef TARGET_AMD64 else { assert(size == EA_PTRSIZE); dst += emitOutputSizeT(dst, val); } #endif if (id->idIsCnsReloc()) { emitRecordRelocation((void*)(dst - (unsigned)EA_SIZE(size)), (void*)(size_t)val, IMAGE_REL_BASED_MOFFSET); } goto DONE; } // Decide which encoding is the shortest bool useSigned, useACC; if (reg == REG_EAX && !instrIs3opImul(ins)) { if (size == EA_1BYTE || (ins == INS_test)) { // For al, ACC encoding is always the smallest useSigned = false; useACC = true; } else { /* For ax/eax, we avoid ACC encoding for small constants as we * can emit the small constant and have it sign-extended. * For big constants, the ACC encoding is better as we can use * the 1 byte opcode */ if (valInByte) { // avoid using ACC encoding useSigned = true; useACC = false; } else { useSigned = false; useACC = true; } } } else { useACC = false; if (valInByte) { useSigned = true; } else { useSigned = false; } } // "test" has no 's' bit if (!HasRegularWideImmediateForm(ins)) { useSigned = false; } // Get the 'base' opcode if (useACC) { assert(!useSigned); code = insCodeACC(ins); } else { assert(!useSigned || valInByte); // Some instructions (at least 'imul') do not have a // r/m, immed form, but do have a dstReg,srcReg,imm8 form. if (valInByte && useSigned && insNeedsRRIb(ins)) { code = insEncodeRRIb(ins, reg, size); } else { code = insCodeMI(ins); code = AddVexPrefixIfNeeded(ins, code, size); code = insEncodeMIreg(ins, reg, size, code); } } switch (size) { case EA_1BYTE: break; case EA_2BYTE: // Output a size prefix for a 16-bit operand dst += emitOutputByte(dst, 0x66); FALLTHROUGH; case EA_4BYTE: // Set the 'w' bit to get the large version code |= 0x1; break; #ifdef TARGET_AMD64 case EA_8BYTE: /* Set the 'w' bit to get the large version */ /* and the REX.W bit to get the really large version */ code = AddRexWPrefix(ins, code); code |= 0x1; break; #endif default: assert(!"unexpected size"); } // Output the REX prefix dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code); // Does the value fit in a sign-extended byte? // Important! Only set the 's' bit when we have a size larger than EA_1BYTE. // Note: A sign-extending immediate when (size == EA_1BYTE) is invalid in 64-bit mode. if (useSigned && (size > EA_1BYTE)) { // We can just set the 's' bit, and issue an immediate byte code |= 0x2; // Set the 's' bit to use a sign-extended immediate byte. dst += emitOutputWord(dst, code); dst += emitOutputByte(dst, val); } else { // Can we use an accumulator (EAX) encoding? if (useACC) { dst += emitOutputByte(dst, code); } else { dst += emitOutputWord(dst, code); } switch (size) { case EA_1BYTE: dst += emitOutputByte(dst, val); break; case EA_2BYTE: dst += emitOutputWord(dst, val); break; case EA_4BYTE: dst += emitOutputLong(dst, val); break; #ifdef TARGET_AMD64 case EA_8BYTE: dst += emitOutputLong(dst, val); break; #endif // TARGET_AMD64 default: break; } if (id->idIsCnsReloc()) { emitRecordRelocation((void*)(dst - sizeof(INT32)), (void*)(size_t)val, IMAGE_REL_BASED_HIGHLOW); assert(size == EA_4BYTE); } } DONE: // Does this instruction operate on a GC ref value? if (id->idGCref()) { switch (id->idInsFmt()) { case IF_RRD_CNS: break; case IF_RWR_CNS: emitGCregLiveUpd(id->idGCref(), id->idReg1(), dst); break; case IF_RRW_CNS: assert(id->idGCref() == GCT_BYREF); #ifdef DEBUG regMaskTP regMask; regMask = genRegMask(reg); // FIXNOW review the other places and relax the assert there too // The reg must currently be holding either a gcref or a byref // GCT_GCREF+int = GCT_BYREF, and GCT_BYREF+/-int = GCT_BYREF if (emitThisGCrefRegs & regMask) { assert(ins == INS_add); } if (emitThisByrefRegs & regMask) { assert(ins == INS_add || ins == INS_sub); } #endif // Mark it as holding a GCT_BYREF emitGCregLiveUpd(GCT_BYREF, id->idReg1(), dst); break; default: #ifdef DEBUG emitDispIns(id, false, false, false); #endif assert(!"unexpected GC ref instruction format"); } // mul can never produce a GC ref assert(!instrIs3opImul(ins)); assert(ins != INS_mulEAX && ins != INS_imulEAX); } else { switch (id->idInsFmt()) { case IF_RRD_CNS: // INS_mulEAX can not be used with any of these formats assert(ins != INS_mulEAX && ins != INS_imulEAX); // For the three operand imul instruction the target // register is encoded in the opcode if (instrIs3opImul(ins)) { regNumber tgtReg = inst3opImulReg(ins); emitGCregDeadUpd(tgtReg, dst); } break; case IF_RRW_CNS: case IF_RWR_CNS: assert(!instrIs3opImul(ins)); emitGCregDeadUpd(id->idReg1(), dst); break; default: #ifdef DEBUG emitDispIns(id, false, false, false); #endif assert(!"unexpected GC ref instruction format"); } } return dst; } /***************************************************************************** * * Output an instruction with a constant operand. */ BYTE* emitter::emitOutputIV(BYTE* dst, instrDesc* id) { code_t code; instruction ins = id->idIns(); emitAttr size = id->idOpSize(); ssize_t val = emitGetInsSC(id); bool valInByte = ((signed char)val == (target_ssize_t)val); // We would to update GC info correctly assert(!IsSSEInstruction(ins)); assert(!IsAVXInstruction(ins)); #ifdef TARGET_AMD64 // all these opcodes take a sign-extended 4-byte immediate, max noway_assert(size < EA_8BYTE || ((int)val == val && !id->idIsCnsReloc())); #endif if (id->idIsCnsReloc()) { valInByte = false; // relocs can't be placed in a byte // Of these instructions only the push instruction can have reloc assert(ins == INS_push || ins == INS_push_hide); } switch (ins) { case INS_jge: assert((val >= -128) && (val <= 127)); dst += emitOutputByte(dst, insCode(ins)); dst += emitOutputByte(dst, val); break; case INS_loop: assert((val >= -128) && (val <= 127)); dst += emitOutputByte(dst, insCodeMI(ins)); dst += emitOutputByte(dst, val); break; case INS_ret: assert(val); dst += emitOutputByte(dst, insCodeMI(ins)); dst += emitOutputWord(dst, val); break; case INS_push_hide: case INS_push: code = insCodeMI(ins); // Does the operand fit in a byte? if (valInByte) { dst += emitOutputByte(dst, code | 2); dst += emitOutputByte(dst, val); } else { if (TakesRexWPrefix(ins, size)) { code = AddRexWPrefix(ins, code); dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code); } dst += emitOutputByte(dst, code); dst += emitOutputLong(dst, val); if (id->idIsCnsReloc()) { emitRecordRelocation((void*)(dst - sizeof(INT32)), (void*)(size_t)val, IMAGE_REL_BASED_HIGHLOW); } } // Did we push a GC ref value? if (id->idGCref()) { #ifdef DEBUG printf("UNDONE: record GCref push [cns]\n"); #endif } break; default: assert(!"unexpected instruction"); } return dst; } /***************************************************************************** * * Output a local jump instruction. * This function also handles non-jumps that have jump-like characteristics, like RIP-relative LEA of a label that * needs to get bound to an actual address and processed by branch shortening. */ BYTE* emitter::emitOutputLJ(insGroup* ig, BYTE* dst, instrDesc* i) { unsigned srcOffs; unsigned dstOffs; BYTE* srcAddr; BYTE* dstAddr; ssize_t distVal; instrDescJmp* id = (instrDescJmp*)i; instruction ins = id->idIns(); bool jmp; bool relAddr = true; // does the instruction use relative-addressing? // SSE/AVX doesnt make any sense here assert(!IsSSEInstruction(ins)); assert(!IsAVXInstruction(ins)); size_t ssz; size_t lsz; switch (ins) { default: ssz = JCC_SIZE_SMALL; lsz = JCC_SIZE_LARGE; jmp = true; break; case INS_jmp: ssz = JMP_SIZE_SMALL; lsz = JMP_SIZE_LARGE; jmp = true; break; case INS_call: ssz = lsz = CALL_INST_SIZE; jmp = false; break; case INS_push_hide: case INS_push: ssz = lsz = 5; jmp = false; relAddr = false; break; case INS_mov: case INS_lea: ssz = lsz = id->idCodeSize(); jmp = false; relAddr = false; break; } // Figure out the distance to the target srcOffs = emitCurCodeOffs(dst); srcAddr = emitOffsetToPtr(srcOffs); if (id->idAddr()->iiaHasInstrCount()) { assert(ig != nullptr); int instrCount = id->idAddr()->iiaGetInstrCount(); unsigned insNum = emitFindInsNum(ig, id); if (instrCount < 0) { // Backward branches using instruction count must be within the same instruction group. assert(insNum + 1 >= (unsigned)(-instrCount)); } dstOffs = ig->igOffs + emitFindOffset(ig, (insNum + 1 + instrCount)); dstAddr = emitOffsetToPtr(dstOffs); } else { dstOffs = id->idAddr()->iiaIGlabel->igOffs; dstAddr = emitOffsetToPtr(dstOffs); if (!relAddr) { srcAddr = nullptr; } } distVal = (ssize_t)(dstAddr - srcAddr); if (dstOffs <= srcOffs) { // This is a backward jump - distance is known at this point CLANG_FORMAT_COMMENT_ANCHOR; #if DEBUG_EMIT if (id->idDebugOnlyInfo()->idNum == (unsigned)INTERESTING_JUMP_NUM || INTERESTING_JUMP_NUM == 0) { size_t blkOffs = id->idjIG->igOffs; if (INTERESTING_JUMP_NUM == 0) { printf("[3] Jump %u:\n", id->idDebugOnlyInfo()->idNum); } printf("[3] Jump block is at %08X - %02X = %08X\n", blkOffs, emitOffsAdj, blkOffs - emitOffsAdj); printf("[3] Jump is at %08X - %02X = %08X\n", srcOffs, emitOffsAdj, srcOffs - emitOffsAdj); printf("[3] Label block is at %08X - %02X = %08X\n", dstOffs, emitOffsAdj, dstOffs - emitOffsAdj); } #endif // Can we use a short jump? if (jmp && distVal - ssz >= (size_t)JMP_DIST_SMALL_MAX_NEG) { emitSetShortJump(id); } } else { // This is a forward jump - distance will be an upper limit emitFwdJumps = true; // The target offset will be closer by at least 'emitOffsAdj', but only if this // jump doesn't cross the hot-cold boundary. if (!emitJumpCrossHotColdBoundary(srcOffs, dstOffs)) { dstOffs -= emitOffsAdj; distVal -= emitOffsAdj; } // Record the location of the jump for later patching id->idjOffs = dstOffs; // Are we overflowing the id->idjOffs bitfield? if (id->idjOffs != dstOffs) { IMPL_LIMITATION("Method is too large"); } #if DEBUG_EMIT if (id->idDebugOnlyInfo()->idNum == (unsigned)INTERESTING_JUMP_NUM || INTERESTING_JUMP_NUM == 0) { size_t blkOffs = id->idjIG->igOffs; if (INTERESTING_JUMP_NUM == 0) { printf("[4] Jump %u:\n", id->idDebugOnlyInfo()->idNum); } printf("[4] Jump block is at %08X\n", blkOffs); printf("[4] Jump is at %08X\n", srcOffs); printf("[4] Label block is at %08X - %02X = %08X\n", dstOffs + emitOffsAdj, emitOffsAdj, dstOffs); } #endif // Can we use a short jump? if (jmp && distVal - ssz <= (size_t)JMP_DIST_SMALL_MAX_POS) { emitSetShortJump(id); } } // Adjust the offset to emit relative to the end of the instruction if (relAddr) { distVal -= id->idjShort ? ssz : lsz; } #ifdef DEBUG if (0 && emitComp->verbose) { size_t sz = id->idjShort ? ssz : lsz; int distValSize = id->idjShort ? 4 : 8; printf("; %s jump [%08X/%03u] from %0*X to %0*X: dist = %08XH\n", (dstOffs <= srcOffs) ? "Fwd" : "Bwd", emitComp->dspPtr(id), id->idDebugOnlyInfo()->idNum, distValSize, srcOffs + sz, distValSize, dstOffs, distVal); } #endif // What size jump should we use? if (id->idjShort) { // Short jump assert(!id->idjKeepLong); assert(emitJumpCrossHotColdBoundary(srcOffs, dstOffs) == false); assert(JMP_SIZE_SMALL == JCC_SIZE_SMALL); assert(JMP_SIZE_SMALL == 2); assert(jmp); if (id->idCodeSize() != JMP_SIZE_SMALL) { #if DEBUG_EMIT || defined(DEBUG) int offsShrinkage = id->idCodeSize() - JMP_SIZE_SMALL; if (INDEBUG(emitComp->verbose ||)(id->idDebugOnlyInfo()->idNum == (unsigned)INTERESTING_JUMP_NUM || INTERESTING_JUMP_NUM == 0)) { printf("; NOTE: size of jump [%08p] mis-predicted by %d bytes\n", dspPtr(id), offsShrinkage); } #endif } dst += emitOutputByte(dst, insCode(ins)); // For forward jumps, record the address of the distance value id->idjTemp.idjAddr = (distVal > 0) ? dst : nullptr; dst += emitOutputByte(dst, distVal); } else { code_t code; // Long jump if (jmp) { // clang-format off assert(INS_jmp + (INS_l_jmp - INS_jmp) == INS_l_jmp); assert(INS_jo + (INS_l_jmp - INS_jmp) == INS_l_jo); assert(INS_jb + (INS_l_jmp - INS_jmp) == INS_l_jb); assert(INS_jae + (INS_l_jmp - INS_jmp) == INS_l_jae); assert(INS_je + (INS_l_jmp - INS_jmp) == INS_l_je); assert(INS_jne + (INS_l_jmp - INS_jmp) == INS_l_jne); assert(INS_jbe + (INS_l_jmp - INS_jmp) == INS_l_jbe); assert(INS_ja + (INS_l_jmp - INS_jmp) == INS_l_ja); assert(INS_js + (INS_l_jmp - INS_jmp) == INS_l_js); assert(INS_jns + (INS_l_jmp - INS_jmp) == INS_l_jns); assert(INS_jp + (INS_l_jmp - INS_jmp) == INS_l_jp); assert(INS_jnp + (INS_l_jmp - INS_jmp) == INS_l_jnp); assert(INS_jl + (INS_l_jmp - INS_jmp) == INS_l_jl); assert(INS_jge + (INS_l_jmp - INS_jmp) == INS_l_jge); assert(INS_jle + (INS_l_jmp - INS_jmp) == INS_l_jle); assert(INS_jg + (INS_l_jmp - INS_jmp) == INS_l_jg); // clang-format on code = insCode((instruction)(ins + (INS_l_jmp - INS_jmp))); } else if (ins == INS_push || ins == INS_push_hide) { assert(insCodeMI(INS_push) == 0x68); code = 0x68; } else if (ins == INS_mov) { // Make it look like IF_SWR_CNS so that emitOutputSV emits the r/m32 for us insFormat tmpInsFmt = id->idInsFmt(); insGroup* tmpIGlabel = id->idAddr()->iiaIGlabel; bool tmpDspReloc = id->idIsDspReloc(); id->idInsFmt(IF_SWR_CNS); id->idAddr()->iiaLclVar = ((instrDescLbl*)id)->dstLclVar; id->idSetIsDspReloc(false); dst = emitOutputSV(dst, id, insCodeMI(ins)); // Restore id fields with original values id->idInsFmt(tmpInsFmt); id->idAddr()->iiaIGlabel = tmpIGlabel; id->idSetIsDspReloc(tmpDspReloc); code = 0xCC; } else if (ins == INS_lea) { // Make an instrDesc that looks like IF_RWR_ARD so that emitOutputAM emits the r/m32 for us. // We basically are doing what emitIns_R_AI does. // TODO-XArch-Cleanup: revisit this. instrDescAmd idAmdStackLocal; instrDescAmd* idAmd = &idAmdStackLocal; *(instrDesc*)idAmd = *(instrDesc*)id; // copy all the "core" fields memset((BYTE*)idAmd + sizeof(instrDesc), 0, sizeof(instrDescAmd) - sizeof(instrDesc)); // zero out the tail that wasn't copied idAmd->idInsFmt(IF_RWR_ARD); idAmd->idAddr()->iiaAddrMode.amBaseReg = REG_NA; idAmd->idAddr()->iiaAddrMode.amIndxReg = REG_NA; emitSetAmdDisp(idAmd, distVal); // set the displacement idAmd->idSetIsDspReloc(id->idIsDspReloc()); assert(emitGetInsAmdAny(idAmd) == distVal); // make sure "disp" is stored properly UNATIVE_OFFSET sz = emitInsSizeAM(idAmd, insCodeRM(ins)); idAmd->idCodeSize(sz); code = insCodeRM(ins); code |= (insEncodeReg345(ins, id->idReg1(), EA_PTRSIZE, &code) << 8); dst = emitOutputAM(dst, idAmd, code, nullptr); code = 0xCC; // For forward jumps, record the address of the distance value // Hard-coded 4 here because we already output the displacement, as the last thing. id->idjTemp.idjAddr = (dstOffs > srcOffs) ? (dst - 4) : nullptr; // We're done return dst; } else { code = 0xE8; } if (ins != INS_mov) { dst += emitOutputByte(dst, code); if (code & 0xFF00) { dst += emitOutputByte(dst, code >> 8); } } // For forward jumps, record the address of the distance value id->idjTemp.idjAddr = (dstOffs > srcOffs) ? dst : nullptr; dst += emitOutputLong(dst, distVal); #ifndef TARGET_AMD64 // all REL32 on AMD have to go through recordRelocation if (emitComp->opts.compReloc) #endif { if (!relAddr) { emitRecordRelocation((void*)(dst - sizeof(INT32)), (void*)distVal, IMAGE_REL_BASED_HIGHLOW); } else if (emitJumpCrossHotColdBoundary(srcOffs, dstOffs)) { assert(id->idjKeepLong); emitRecordRelocation((void*)(dst - sizeof(INT32)), dst + distVal, IMAGE_REL_BASED_REL32); } } } // Local calls kill all registers if (ins == INS_call && (emitThisGCrefRegs | emitThisByrefRegs)) { emitGCregDeadUpdMask(emitThisGCrefRegs | emitThisByrefRegs, dst); } return dst; } /***************************************************************************** * * Append the machine code corresponding to the given instruction descriptor * to the code block at '*dp'; the base of the code block is 'bp', and 'ig' * is the instruction group that contains the instruction. Updates '*dp' to * point past the generated code, and returns the size of the instruction * descriptor in bytes. */ #ifdef _PREFAST_ #pragma warning(push) #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function #endif size_t emitter::emitOutputInstr(insGroup* ig, instrDesc* id, BYTE** dp) { assert(emitIssuing); BYTE* dst = *dp; size_t sz = sizeof(instrDesc); instruction ins = id->idIns(); unsigned char callInstrSize = 0; #ifdef DEBUG bool dspOffs = emitComp->opts.dspGCtbls; #endif // DEBUG emitAttr size = id->idOpSize(); assert(REG_NA == (int)REG_NA); assert(ins != INS_imul || size >= EA_4BYTE); // Has no 'w' bit assert(instrIs3opImul(id->idIns()) == 0 || size >= EA_4BYTE); // Has no 'w' bit VARSET_TP GCvars(VarSetOps::UninitVal()); // What instruction format have we got? switch (id->idInsFmt()) { code_t code; unsigned regcode; int args; CnsVal cnsVal; BYTE* addr; bool recCall; regMaskTP gcrefRegs; regMaskTP byrefRegs; /********************************************************************/ /* No operands */ /********************************************************************/ case IF_NONE: // the loop alignment pseudo instruction if (ins == INS_align) { sz = sizeof(instrDescAlign); // IG can be marked as not needing alignment after emitting align instruction // In such case, skip outputting alignment. if (ig->endsWithAlignInstr()) { dst = emitOutputAlign(ig, id, dst); } #ifdef DEBUG else { // If the IG is not marked as need alignment, then the code size // should be zero i.e. no padding needed. assert(id->idCodeSize() == 0); } #endif break; } if (ins == INS_nop) { BYTE* dstRW = dst + writeableOffset; dstRW = emitOutputNOP(dstRW, id->idCodeSize()); dst = dstRW - writeableOffset; break; } // the cdq instruction kills the EDX register implicitly if (ins == INS_cdq) { emitGCregDeadUpd(REG_EDX, dst); } assert(id->idGCref() == GCT_NONE); code = insCodeMR(ins); #ifdef TARGET_AMD64 // Support only scalar AVX instructions and hence size is hard coded to 4-byte. code = AddVexPrefixIfNeeded(ins, code, EA_4BYTE); if (((ins == INS_cdq) || (ins == INS_cwde)) && TakesRexWPrefix(ins, id->idOpSize())) { code = AddRexWPrefix(ins, code); } dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code); #endif // Is this a 'big' opcode? if (code & 0xFF000000) { // The high word and then the low word dst += emitOutputWord(dst, code >> 16); code &= 0x0000FFFF; dst += emitOutputWord(dst, code); } else if (code & 0x00FF0000) { // The high byte and then the low word dst += emitOutputByte(dst, code >> 16); code &= 0x0000FFFF; dst += emitOutputWord(dst, code); } else if (code & 0xFF00) { // The 2 byte opcode dst += emitOutputWord(dst, code); } else { // The 1 byte opcode dst += emitOutputByte(dst, code); } break; /********************************************************************/ /* Simple constant, local label, method */ /********************************************************************/ case IF_CNS: dst = emitOutputIV(dst, id); sz = emitSizeOfInsDsc(id); break; case IF_LABEL: case IF_RWR_LABEL: case IF_SWR_LABEL: assert(id->idGCref() == GCT_NONE); assert(id->idIsBound()); // TODO-XArch-Cleanup: handle IF_RWR_LABEL in emitOutputLJ() or change it to emitOutputAM()? dst = emitOutputLJ(ig, dst, id); sz = (id->idInsFmt() == IF_SWR_LABEL ? sizeof(instrDescLbl) : sizeof(instrDescJmp)); break; case IF_METHOD: case IF_METHPTR: // Assume we'll be recording this call recCall = true; // Get hold of the argument count and field Handle args = emitGetInsCDinfo(id); // Is this a "fat" call descriptor? if (id->idIsLargeCall()) { instrDescCGCA* idCall = (instrDescCGCA*)id; gcrefRegs = idCall->idcGcrefRegs; byrefRegs = idCall->idcByrefRegs; VarSetOps::Assign(emitComp, GCvars, idCall->idcGCvars); sz = sizeof(instrDescCGCA); } else { assert(!id->idIsLargeDsp()); assert(!id->idIsLargeCns()); gcrefRegs = emitDecodeCallGCregs(id); byrefRegs = 0; VarSetOps::AssignNoCopy(emitComp, GCvars, VarSetOps::MakeEmpty(emitComp)); sz = sizeof(instrDesc); } addr = (BYTE*)id->idAddr()->iiaAddr; assert(addr != nullptr); // Some helpers don't get recorded in GC tables if (id->idIsNoGC()) { recCall = false; } // What kind of a call do we have here? if (id->idInsFmt() == IF_METHPTR) { // This is call indirect via a method pointer assert((ins == INS_call) || (ins == INS_tail_i_jmp)); code = insCodeMR(ins); if (id->idIsDspReloc()) { dst += emitOutputWord(dst, code | 0x0500); #ifdef TARGET_AMD64 dst += emitOutputLong(dst, 0); #else dst += emitOutputLong(dst, (int)(ssize_t)addr); #endif emitRecordRelocation((void*)(dst - sizeof(int)), addr, IMAGE_REL_BASED_DISP32); } else { #ifdef TARGET_X86 dst += emitOutputWord(dst, code | 0x0500); #else // TARGET_AMD64 // Amd64: addr fits within 32-bits and can be encoded as a displacement relative to zero. // This addr mode should never be used while generating relocatable ngen code nor if // the addr can be encoded as pc-relative address. noway_assert(!emitComp->opts.compReloc); noway_assert(codeGen->genAddrRelocTypeHint((size_t)addr) != IMAGE_REL_BASED_REL32); noway_assert(static_cast<int>(reinterpret_cast<intptr_t>(addr)) == (ssize_t)addr); // This requires, specifying a SIB byte after ModRM byte. dst += emitOutputWord(dst, code | 0x0400); dst += emitOutputByte(dst, 0x25); #endif // TARGET_AMD64 dst += emitOutputLong(dst, static_cast<int>(reinterpret_cast<intptr_t>(addr))); } goto DONE_CALL; } // Else // This is call direct where we know the target, thus we can // use a direct call; the target to jump to is in iiaAddr. assert(id->idInsFmt() == IF_METHOD); // Output the call opcode followed by the target distance dst += (ins == INS_l_jmp) ? emitOutputByte(dst, insCode(ins)) : emitOutputByte(dst, insCodeMI(ins)); ssize_t offset; #ifdef TARGET_AMD64 // All REL32 on Amd64 go through recordRelocation. Here we will output zero to advance dst. offset = 0; assert(id->idIsDspReloc()); #else // Calculate PC relative displacement. // Although you think we should be using sizeof(void*), the x86 and x64 instruction set // only allow a 32-bit offset, so we correctly use sizeof(INT32) offset = addr - (dst + sizeof(INT32)); #endif dst += emitOutputLong(dst, offset); if (id->idIsDspReloc()) { emitRecordRelocation((void*)(dst - sizeof(INT32)), addr, IMAGE_REL_BASED_REL32); } DONE_CALL: /* We update the variable (not register) GC info before the call as the variables cannot be used by the call. Killing variables before the call helps with boundary conditions if the call is CORINFO_HELP_THROW - see bug 50029. If we ever track aliased variables (which could be used by the call), we would have to keep them alive past the call. */ assert(FitsIn<unsigned char>(dst - *dp)); callInstrSize = static_cast<unsigned char>(dst - *dp); // Note the use of address `*dp`, the call instruction address, instead of `dst`, the post-call-instruction // address. emitUpdateLiveGCvars(GCvars, *dp); #ifdef DEBUG // Output any delta in GC variable info, corresponding to the before-call GC var updates done above. if (EMIT_GC_VERBOSE || emitComp->opts.disasmWithGC) { emitDispGCVarDelta(); } #endif // DEBUG // If the method returns a GC ref, mark EAX appropriately if (id->idGCref() == GCT_GCREF) { gcrefRegs |= RBM_EAX; } else if (id->idGCref() == GCT_BYREF) { byrefRegs |= RBM_EAX; } #ifdef UNIX_AMD64_ABI // If is a multi-register return method is called, mark RDX appropriately (for System V AMD64). if (id->idIsLargeCall()) { instrDescCGCA* idCall = (instrDescCGCA*)id; if (idCall->idSecondGCref() == GCT_GCREF) { gcrefRegs |= RBM_RDX; } else if (idCall->idSecondGCref() == GCT_BYREF) { byrefRegs |= RBM_RDX; } } #endif // UNIX_AMD64_ABI // If the GC register set has changed, report the new set if (gcrefRegs != emitThisGCrefRegs) { emitUpdateLiveGCregs(GCT_GCREF, gcrefRegs, dst); } if (byrefRegs != emitThisByrefRegs) { emitUpdateLiveGCregs(GCT_BYREF, byrefRegs, dst); } if (recCall || args) { // For callee-pop, all arguments will be popped after the call. // For caller-pop, any GC arguments will go dead after the call. assert(callInstrSize != 0); if (args >= 0) { emitStackPop(dst, /*isCall*/ true, callInstrSize, args); } else { emitStackKillArgs(dst, -args, callInstrSize); } } // Do we need to record a call location for GC purposes? if (!emitFullGCinfo && recCall) { assert(callInstrSize != 0); emitRecordGCcall(dst, callInstrSize); } #ifdef DEBUG if (ins == INS_call) { emitRecordCallSite(emitCurCodeOffs(*dp), id->idDebugOnlyInfo()->idCallSig, (CORINFO_METHOD_HANDLE)id->idDebugOnlyInfo()->idMemCookie); } #endif // DEBUG break; /********************************************************************/ /* One register operand */ /********************************************************************/ case IF_RRD: case IF_RWR: case IF_RRW: dst = emitOutputR(dst, id); sz = SMALL_IDSC_SIZE; break; /********************************************************************/ /* Register and register/constant */ /********************************************************************/ case IF_RRW_SHF: code = insCodeMR(ins); // Emit the VEX prefix if it exists code = AddVexPrefixIfNeeded(ins, code, size); code = insEncodeMRreg(ins, id->idReg1(), size, code); // set the W bit if (size != EA_1BYTE) { code |= 1; } // Emit the REX prefix if it exists if (TakesRexWPrefix(ins, size)) { code = AddRexWPrefix(ins, code); } // Output a size prefix for a 16-bit operand if (size == EA_2BYTE) { dst += emitOutputByte(dst, 0x66); } dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code); dst += emitOutputWord(dst, code); dst += emitOutputByte(dst, emitGetInsSC(id)); sz = emitSizeOfInsDsc(id); // Update GC info. assert(!id->idGCref()); emitGCregDeadUpd(id->idReg1(), dst); break; case IF_RRD_RRD: case IF_RWR_RRD: case IF_RRW_RRD: case IF_RRW_RRW: dst = emitOutputRR(dst, id); sz = SMALL_IDSC_SIZE; break; case IF_RRD_CNS: case IF_RWR_CNS: case IF_RRW_CNS: dst = emitOutputRI(dst, id); sz = emitSizeOfInsDsc(id); break; case IF_RWR_RRD_RRD: dst = emitOutputRRR(dst, id); sz = emitSizeOfInsDsc(id); break; case IF_RWR_RRD_RRD_CNS: case IF_RWR_RRD_RRD_RRD: dst = emitOutputRRR(dst, id); sz = emitSizeOfInsDsc(id); dst += emitOutputByte(dst, emitGetInsSC(id)); break; case IF_RRW_RRW_CNS: assert(id->idGCref() == GCT_NONE); // Get the 'base' opcode (it's a big one) // Also, determine which operand goes where in the ModRM byte. regNumber mReg; regNumber rReg; if (hasCodeMR(ins)) { code = insCodeMR(ins); // Emit the VEX prefix if it exists code = AddVexPrefixIfNeeded(ins, code, size); code = insEncodeMRreg(ins, code); mReg = id->idReg1(); rReg = id->idReg2(); } else if (hasCodeMI(ins)) { code = insCodeMI(ins); // Emit the VEX prefix if it exists code = AddVexPrefixIfNeeded(ins, code, size); assert((code & 0xC000) == 0); code |= 0xC000; mReg = id->idReg2(); // The left and right shifts use the same encoding, and are distinguished by the Reg/Opcode field. rReg = getSseShiftRegNumber(ins); } else { code = insCodeRM(ins); // Emit the VEX prefix if it exists code = AddVexPrefixIfNeeded(ins, code, size); code = insEncodeRMreg(ins, code); mReg = id->idReg2(); rReg = id->idReg1(); } assert(code & 0x00FF0000); if (TakesRexWPrefix(ins, size)) { code = AddRexWPrefix(ins, code); } if (TakesVexPrefix(ins)) { if (IsDstDstSrcAVXInstruction(ins)) { // Encode source/dest operand reg in 'vvvv' bits in 1's complement form // This code will have to change when we support 3 operands. // For now, we always overload this source with the destination (always reg1). // (Though we will need to handle the few ops that can have the 'vvvv' bits as destination, // e.g. pslldq, when/if we support those instructions with 2 registers.) // (see x64 manual Table 2-9. Instructions with a VEX.vvvv destination) code = insEncodeReg3456(ins, id->idReg1(), size, code); } else if (IsDstSrcSrcAVXInstruction(ins)) { // This is a "merge" move instruction. // Encode source operand reg in 'vvvv' bits in 1's complement form code = insEncodeReg3456(ins, id->idReg2(), size, code); } } regcode = (insEncodeReg345(ins, rReg, size, &code) | insEncodeReg012(ins, mReg, size, &code)); // Output the REX prefix dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code); if (code & 0xFF000000) { // Output the highest word of the opcode dst += emitOutputWord(dst, code >> 16); code &= 0x0000FFFF; if (Is4ByteSSEInstruction(ins)) { // Output 3rd byte of the opcode dst += emitOutputByte(dst, code); code &= 0xFF00; } } else if (code & 0x00FF0000) { dst += emitOutputByte(dst, code >> 16); code &= 0x0000FFFF; } // TODO-XArch-CQ: Right now support 4-byte opcode instructions only if ((code & 0xFF00) == 0xC000) { dst += emitOutputWord(dst, code | (regcode << 8)); } else if ((code & 0xFF) == 0x00) { // This case happens for some SSE/AVX instructions only assert(IsAVXInstruction(ins) || Is4ByteSSEInstruction(ins)); dst += emitOutputByte(dst, (code >> 8) & 0xFF); dst += emitOutputByte(dst, (0xC0 | regcode)); } else { dst += emitOutputWord(dst, code); dst += emitOutputByte(dst, (0xC0 | regcode)); } dst += emitOutputByte(dst, emitGetInsSC(id)); sz = emitSizeOfInsDsc(id); // Kill any GC ref in the destination register if necessary. if (!emitInsCanOnlyWriteSSE2OrAVXReg(id)) { emitGCregDeadUpd(id->idReg1(), dst); } break; /********************************************************************/ /* Address mode operand */ /********************************************************************/ case IF_ARD: case IF_AWR: case IF_ARW: dst = emitCodeWithInstructionSize(dst, emitOutputAM(dst, id, insCodeMR(ins)), &callInstrSize); switch (ins) { case INS_call: IND_CALL: // Get hold of the argument count and method handle args = emitGetInsCIargs(id); // Is this a "fat" call descriptor? if (id->idIsLargeCall()) { instrDescCGCA* idCall = (instrDescCGCA*)id; gcrefRegs = idCall->idcGcrefRegs; byrefRegs = idCall->idcByrefRegs; VarSetOps::Assign(emitComp, GCvars, idCall->idcGCvars); sz = sizeof(instrDescCGCA); } else { assert(!id->idIsLargeDsp()); assert(!id->idIsLargeCns()); gcrefRegs = emitDecodeCallGCregs(id); byrefRegs = 0; VarSetOps::AssignNoCopy(emitComp, GCvars, VarSetOps::MakeEmpty(emitComp)); sz = sizeof(instrDesc); } recCall = true; goto DONE_CALL; default: sz = emitSizeOfInsDsc(id); break; } break; case IF_RRW_ARD_CNS: case IF_RWR_ARD_CNS: assert(IsSSEOrAVXInstruction(ins)); emitGetInsAmdCns(id, &cnsVal); code = insCodeRM(ins); // Special case 4-byte AVX instructions if (EncodedBySSE38orSSE3A(ins)) { dst = emitOutputAM(dst, id, code, &cnsVal); } else { code = AddVexPrefixIfNeeded(ins, code, size); regcode = (insEncodeReg345(ins, id->idReg1(), size, &code) << 8); dst = emitOutputAM(dst, id, code | regcode, &cnsVal); } sz = emitSizeOfInsDsc(id); break; case IF_AWR_RRD_CNS: assert(ins == INS_vextracti128 || ins == INS_vextractf128); assert(UseVEXEncoding()); emitGetInsAmdCns(id, &cnsVal); code = insCodeMR(ins); dst = emitOutputAM(dst, id, code, &cnsVal); sz = emitSizeOfInsDsc(id); break; case IF_RRD_ARD: case IF_RWR_ARD: case IF_RRW_ARD: case IF_RWR_RRD_ARD: { code = insCodeRM(ins); if (EncodedBySSE38orSSE3A(ins) || (ins == INS_crc32)) { dst = emitOutputAM(dst, id, code); } else { code = AddVexPrefixIfNeeded(ins, code, size); regcode = (insEncodeReg345(ins, id->idReg1(), size, &code) << 8); dst = emitOutputAM(dst, id, code | regcode); } sz = emitSizeOfInsDsc(id); break; } case IF_RWR_ARD_RRD: { assert(IsAVX2GatherInstruction(ins)); code = insCodeRM(ins); dst = emitOutputAM(dst, id, code); sz = emitSizeOfInsDsc(id); break; } case IF_RWR_RRD_ARD_CNS: case IF_RWR_RRD_ARD_RRD: { assert(IsSSEOrAVXInstruction(ins)); emitGetInsAmdCns(id, &cnsVal); code = insCodeRM(ins); if (EncodedBySSE38orSSE3A(ins)) { dst = emitOutputAM(dst, id, code, &cnsVal); } else { code = AddVexPrefixIfNeeded(ins, code, size); regcode = (insEncodeReg345(ins, id->idReg1(), size, &code) << 8); dst = emitOutputAM(dst, id, code | regcode, &cnsVal); } sz = emitSizeOfInsDsc(id); break; } case IF_ARD_RRD: case IF_AWR_RRD: case IF_ARW_RRD: code = insCodeMR(ins); code = AddVexPrefixIfNeeded(ins, code, size); regcode = (insEncodeReg345(ins, id->idReg1(), size, &code) << 8); dst = emitOutputAM(dst, id, code | regcode); sz = emitSizeOfInsDsc(id); break; case IF_AWR_RRD_RRD: { code = insCodeMR(ins); code = AddVexPrefixIfNeeded(ins, code, size); dst = emitOutputAM(dst, id, code); sz = emitSizeOfInsDsc(id); break; } case IF_ARD_CNS: case IF_AWR_CNS: case IF_ARW_CNS: emitGetInsAmdCns(id, &cnsVal); dst = emitOutputAM(dst, id, insCodeMI(ins), &cnsVal); sz = emitSizeOfInsDsc(id); break; case IF_ARW_SHF: emitGetInsAmdCns(id, &cnsVal); dst = emitOutputAM(dst, id, insCodeMR(ins), &cnsVal); sz = emitSizeOfInsDsc(id); break; /********************************************************************/ /* Stack-based operand */ /********************************************************************/ case IF_SRD: case IF_SWR: case IF_SRW: assert(ins != INS_pop_hide); if (ins == INS_pop) { // The offset in "pop [ESP+xxx]" is relative to the new ESP value CLANG_FORMAT_COMMENT_ANCHOR; #if !FEATURE_FIXED_OUT_ARGS emitCurStackLvl -= sizeof(int); #endif dst = emitOutputSV(dst, id, insCodeMR(ins)); #if !FEATURE_FIXED_OUT_ARGS emitCurStackLvl += sizeof(int); #endif break; } dst = emitCodeWithInstructionSize(dst, emitOutputSV(dst, id, insCodeMR(ins)), &callInstrSize); if (ins == INS_call) { goto IND_CALL; } break; case IF_SRD_CNS: case IF_SWR_CNS: case IF_SRW_CNS: emitGetInsCns(id, &cnsVal); dst = emitOutputSV(dst, id, insCodeMI(ins), &cnsVal); sz = emitSizeOfInsDsc(id); break; case IF_SRW_SHF: emitGetInsCns(id, &cnsVal); dst = emitOutputSV(dst, id, insCodeMR(ins), &cnsVal); sz = emitSizeOfInsDsc(id); break; case IF_SWR_RRD_CNS: assert(ins == INS_vextracti128 || ins == INS_vextractf128); assert(UseVEXEncoding()); emitGetInsAmdCns(id, &cnsVal); code = insCodeMR(ins); dst = emitOutputSV(dst, id, insCodeMR(ins), &cnsVal); sz = emitSizeOfInsDsc(id); break; case IF_RRW_SRD_CNS: case IF_RWR_SRD_CNS: assert(IsSSEOrAVXInstruction(ins)); emitGetInsCns(id, &cnsVal); code = insCodeRM(ins); // Special case 4-byte AVX instructions if (EncodedBySSE38orSSE3A(ins)) { dst = emitOutputSV(dst, id, code, &cnsVal); } else { code = AddVexPrefixIfNeeded(ins, code, size); // In case of AVX instructions that take 3 operands, encode reg1 as first source. // Note that reg1 is both a source and a destination. // // TODO-XArch-CQ: Eventually we need to support 3 operand instruction formats. For // now we use the single source as source1 and source2. // For this format, moves do not support a third operand, so we only need to handle the binary ops. if (IsDstDstSrcAVXInstruction(ins)) { // encode source operand reg in 'vvvv' bits in 1's complement form code = insEncodeReg3456(ins, id->idReg1(), size, code); } regcode = (insEncodeReg345(ins, id->idReg1(), size, &code) << 8); dst = emitOutputSV(dst, id, code | regcode, &cnsVal); } sz = emitSizeOfInsDsc(id); break; case IF_RRD_SRD: case IF_RWR_SRD: case IF_RRW_SRD: { code = insCodeRM(ins); // 4-byte AVX instructions are special cased inside emitOutputSV // since they do not have space to encode ModRM byte. if (EncodedBySSE38orSSE3A(ins) || (ins == INS_crc32)) { dst = emitOutputSV(dst, id, code); } else { code = AddVexPrefixIfNeeded(ins, code, size); if (IsDstDstSrcAVXInstruction(ins)) { // encode source operand reg in 'vvvv' bits in 1's complement form code = insEncodeReg3456(ins, id->idReg1(), size, code); } regcode = (insEncodeReg345(ins, id->idReg1(), size, &code) << 8); dst = emitOutputSV(dst, id, code | regcode); } sz = emitSizeOfInsDsc(id); break; } case IF_RWR_RRD_SRD: { // This should only be called on AVX instructions assert(IsAVXInstruction(ins)); code = insCodeRM(ins); code = AddVexPrefixIfNeeded(ins, code, size); code = insEncodeReg3456(ins, id->idReg2(), size, code); // encode source operand reg in 'vvvv' bits in 1's complement form // 4-byte AVX instructions are special cased inside emitOutputSV // since they do not have space to encode ModRM byte. if (EncodedBySSE38orSSE3A(ins)) { dst = emitOutputSV(dst, id, code); } else { regcode = (insEncodeReg345(ins, id->idReg1(), size, &code) << 8); dst = emitOutputSV(dst, id, code | regcode); } break; } case IF_RWR_RRD_SRD_CNS: case IF_RWR_RRD_SRD_RRD: { // This should only be called on AVX instructions assert(IsAVXInstruction(ins)); emitGetInsCns(id, &cnsVal); code = insCodeRM(ins); code = AddVexPrefixIfNeeded(ins, code, size); code = insEncodeReg3456(ins, id->idReg2(), size, code); // encode source operand reg in 'vvvv' bits in 1's complement form // 4-byte AVX instructions are special cased inside emitOutputSV // since they do not have space to encode ModRM byte. if (EncodedBySSE38orSSE3A(ins)) { dst = emitOutputSV(dst, id, code, &cnsVal); } else { regcode = (insEncodeReg345(ins, id->idReg1(), size, &code) << 8); dst = emitOutputSV(dst, id, code | regcode, &cnsVal); } sz = emitSizeOfInsDsc(id); break; } case IF_SRD_RRD: case IF_SWR_RRD: case IF_SRW_RRD: code = insCodeMR(ins); code = AddVexPrefixIfNeeded(ins, code, size); // In case of AVX instructions that take 3 operands, encode reg1 as first source. // Note that reg1 is both a source and a destination. // // TODO-XArch-CQ: Eventually we need to support 3 operand instruction formats. For // now we use the single source as source1 and source2. // For this format, moves do not support a third operand, so we only need to handle the binary ops. if (IsDstDstSrcAVXInstruction(ins)) { // encode source operand reg in 'vvvv' bits in 1's complement form code = insEncodeReg3456(ins, id->idReg1(), size, code); } regcode = (insEncodeReg345(ins, id->idReg1(), size, &code) << 8); dst = emitOutputSV(dst, id, code | regcode); break; /********************************************************************/ /* Direct memory address */ /********************************************************************/ case IF_MRD: case IF_MRW: case IF_MWR: noway_assert(ins != INS_call); dst = emitOutputCV(dst, id, insCodeMR(ins) | 0x0500); sz = emitSizeOfInsDsc(id); break; case IF_MRD_OFF: dst = emitOutputCV(dst, id, insCodeMI(ins)); break; case IF_RRW_MRD_CNS: case IF_RWR_MRD_CNS: assert(IsSSEOrAVXInstruction(ins)); emitGetInsDcmCns(id, &cnsVal); code = insCodeRM(ins); // Special case 4-byte AVX instructions if (EncodedBySSE38orSSE3A(ins)) { dst = emitOutputCV(dst, id, code, &cnsVal); } else { code = AddVexPrefixIfNeeded(ins, code, size); // In case of AVX instructions that take 3 operands, encode reg1 as first source. // Note that reg1 is both a source and a destination. // // TODO-XArch-CQ: Eventually we need to support 3 operand instruction formats. For // now we use the single source as source1 and source2. // For this format, moves do not support a third operand, so we only need to handle the binary ops. if (IsDstDstSrcAVXInstruction(ins)) { // encode source operand reg in 'vvvv' bits in 1's complement form code = insEncodeReg3456(ins, id->idReg1(), size, code); } regcode = (insEncodeReg345(ins, id->idReg1(), size, &code) << 8); dst = emitOutputCV(dst, id, code | regcode | 0x0500, &cnsVal); } sz = emitSizeOfInsDsc(id); break; case IF_MWR_RRD_CNS: assert(ins == INS_vextracti128 || ins == INS_vextractf128); assert(UseVEXEncoding()); emitGetInsDcmCns(id, &cnsVal); code = insCodeMR(ins); // only AVX2 vextracti128 and AVX vextractf128 can reach this path, // they do not need VEX.vvvv to encode the register operand dst = emitOutputCV(dst, id, code, &cnsVal); sz = emitSizeOfInsDsc(id); break; case IF_RRD_MRD: case IF_RWR_MRD: case IF_RRW_MRD: { code = insCodeRM(ins); // Special case 4-byte AVX instructions if (EncodedBySSE38orSSE3A(ins) || (ins == INS_crc32)) { dst = emitOutputCV(dst, id, code); } else { code = AddVexPrefixIfNeeded(ins, code, size); if (IsDstDstSrcAVXInstruction(ins)) { // encode source operand reg in 'vvvv' bits in 1's complement form code = insEncodeReg3456(ins, id->idReg1(), size, code); } regcode = (insEncodeReg345(ins, id->idReg1(), size, &code) << 8); dst = emitOutputCV(dst, id, code | regcode | 0x0500); } sz = emitSizeOfInsDsc(id); break; } case IF_RWR_RRD_MRD: { // This should only be called on AVX instructions assert(IsAVXInstruction(ins)); code = insCodeRM(ins); code = AddVexPrefixIfNeeded(ins, code, size); code = insEncodeReg3456(ins, id->idReg2(), size, code); // encode source operand reg in 'vvvv' bits in 1's complement form // Special case 4-byte AVX instructions if (EncodedBySSE38orSSE3A(ins)) { dst = emitOutputCV(dst, id, code); } else { regcode = (insEncodeReg345(ins, id->idReg1(), size, &code) << 8); dst = emitOutputCV(dst, id, code | regcode | 0x0500); } sz = emitSizeOfInsDsc(id); break; } case IF_RWR_RRD_MRD_CNS: case IF_RWR_RRD_MRD_RRD: { // This should only be called on AVX instructions assert(IsAVXInstruction(ins)); emitGetInsCns(id, &cnsVal); code = insCodeRM(ins); code = AddVexPrefixIfNeeded(ins, code, size); code = insEncodeReg3456(ins, id->idReg2(), size, code); // encode source operand reg in 'vvvv' bits in 1's complement form // Special case 4-byte AVX instructions if (EncodedBySSE38orSSE3A(ins)) { dst = emitOutputCV(dst, id, code, &cnsVal); } else { regcode = (insEncodeReg345(ins, id->idReg1(), size, &code) << 8); dst = emitOutputCV(dst, id, code | regcode | 0x0500, &cnsVal); } sz = emitSizeOfInsDsc(id); break; } case IF_RWR_MRD_OFF: code = insCode(ins); code = AddVexPrefixIfNeeded(ins, code, size); // In case of AVX instructions that take 3 operands, encode reg1 as first source. // Note that reg1 is both a source and a destination. // // TODO-XArch-CQ: Eventually we need to support 3 operand instruction formats. For // now we use the single source as source1 and source2. // For this format, moves do not support a third operand, so we only need to handle the binary ops. if (IsDstDstSrcAVXInstruction(ins)) { // encode source operand reg in 'vvvv' bits in 1's complement form code = insEncodeReg3456(ins, id->idReg1(), size, code); } regcode = insEncodeReg012(id->idIns(), id->idReg1(), size, &code); dst = emitOutputCV(dst, id, code | 0x30 | regcode); sz = emitSizeOfInsDsc(id); break; case IF_MRD_RRD: case IF_MWR_RRD: case IF_MRW_RRD: code = insCodeMR(ins); code = AddVexPrefixIfNeeded(ins, code, size); // In case of AVX instructions that take 3 operands, encode reg1 as first source. // Note that reg1 is both a source and a destination. // // TODO-XArch-CQ: Eventually we need to support 3 operand instruction formats. For // now we use the single source as source1 and source2. // For this format, moves do not support a third operand, so we only need to handle the binary ops. if (IsDstDstSrcAVXInstruction(ins)) { // encode source operand reg in 'vvvv' bits in 1's complement form code = insEncodeReg3456(ins, id->idReg1(), size, code); } regcode = (insEncodeReg345(ins, id->idReg1(), size, &code) << 8); dst = emitOutputCV(dst, id, code | regcode | 0x0500); sz = emitSizeOfInsDsc(id); break; case IF_MRD_CNS: case IF_MWR_CNS: case IF_MRW_CNS: emitGetInsDcmCns(id, &cnsVal); dst = emitOutputCV(dst, id, insCodeMI(ins) | 0x0500, &cnsVal); sz = emitSizeOfInsDsc(id); break; case IF_MRW_SHF: emitGetInsDcmCns(id, &cnsVal); dst = emitOutputCV(dst, id, insCodeMR(ins) | 0x0500, &cnsVal); sz = emitSizeOfInsDsc(id); break; /********************************************************************/ /* oops */ /********************************************************************/ default: #ifdef DEBUG printf("unexpected format %s\n", emitIfName(id->idInsFmt())); assert(!"don't know how to encode this instruction"); #endif break; } // Make sure we set the instruction descriptor size correctly assert(sz == emitSizeOfInsDsc(id)); #if !FEATURE_FIXED_OUT_ARGS bool updateStackLevel = !emitIGisInProlog(ig) && !emitIGisInEpilog(ig); #if defined(FEATURE_EH_FUNCLETS) updateStackLevel = updateStackLevel && !emitIGisInFuncletProlog(ig) && !emitIGisInFuncletEpilog(ig); #endif // FEATURE_EH_FUNCLETS // Make sure we keep the current stack level up to date if (updateStackLevel) { switch (ins) { case INS_push: // Please note: {INS_push_hide,IF_LABEL} is used to push the address of the // finally block for calling it locally for an op_leave. emitStackPush(dst, id->idGCref()); break; case INS_pop: emitStackPop(dst, false, /*callInstrSize*/ 0, 1); break; case INS_sub: // Check for "sub ESP, icon" if (ins == INS_sub && id->idInsFmt() == IF_RRW_CNS && id->idReg1() == REG_ESP) { assert((size_t)emitGetInsSC(id) < 0x00000000FFFFFFFFLL); emitStackPushN(dst, (unsigned)(emitGetInsSC(id) / TARGET_POINTER_SIZE)); } break; case INS_add: // Check for "add ESP, icon" if (ins == INS_add && id->idInsFmt() == IF_RRW_CNS && id->idReg1() == REG_ESP) { assert((size_t)emitGetInsSC(id) < 0x00000000FFFFFFFFLL); emitStackPop(dst, /*isCall*/ false, /*callInstrSize*/ 0, (unsigned)(emitGetInsSC(id) / TARGET_POINTER_SIZE)); } break; default: break; } } #endif // !FEATURE_FIXED_OUT_ARGS assert((int)emitCurStackLvl >= 0); // Only epilog "instructions" and some pseudo-instrs // are allowed not to generate any code assert(*dp != dst || emitInstHasNoCode(ins)); #ifdef DEBUG if (emitComp->opts.disAsm || emitComp->verbose) { emitDispIns(id, false, dspOffs, true, emitCurCodeOffs(*dp), *dp, (dst - *dp)); } #endif #if FEATURE_LOOP_ALIGN // Only compensate over-estimated instructions if emitCurIG is before // the last IG that needs alignment. if (emitCurIG->igNum <= emitLastAlignedIgNum) { int diff = id->idCodeSize() - ((UNATIVE_OFFSET)(dst - *dp)); assert(diff >= 0); if (diff != 0) { #ifdef DEBUG // should never over-estimate align instruction assert(id->idIns() != INS_align); JITDUMP("Added over-estimation compensation: %d\n", diff); if (emitComp->opts.disAsm) { emitDispInsAddr(dst); printf("\t\t ;; NOP compensation instructions of %d bytes.\n", diff); } #endif BYTE* dstRW = dst + writeableOffset; dstRW = emitOutputNOP(dstRW, diff); dst = dstRW - writeableOffset; } assert((id->idCodeSize() - ((UNATIVE_OFFSET)(dst - *dp))) == 0); } #endif #ifdef DEBUG if (emitComp->compDebugBreak) { // set JitEmitPrintRefRegs=1 will print out emitThisGCrefRegs and emitThisByrefRegs // at the beginning of this method. if (JitConfig.JitEmitPrintRefRegs() != 0) { printf("Before emitOutputInstr for id->idDebugOnlyInfo()->idNum=0x%02x\n", id->idDebugOnlyInfo()->idNum); printf(" emitThisGCrefRegs(0x%p)=", emitComp->dspPtr(&emitThisGCrefRegs)); printRegMaskInt(emitThisGCrefRegs); emitDispRegSet(emitThisGCrefRegs); printf("\n"); printf(" emitThisByrefRegs(0x%p)=", emitComp->dspPtr(&emitThisByrefRegs)); printRegMaskInt(emitThisByrefRegs); emitDispRegSet(emitThisByrefRegs); printf("\n"); } // For example, set JitBreakEmitOutputInstr=a6 will break when this method is called for // emitting instruction a6, (i.e. IN00a6 in jitdump). if ((unsigned)JitConfig.JitBreakEmitOutputInstr() == id->idDebugOnlyInfo()->idNum) { assert(!"JitBreakEmitOutputInstr reached"); } } #endif *dp = dst; #ifdef DEBUG if (ins == INS_mulEAX || ins == INS_imulEAX) { // INS_mulEAX has implicit target of Edx:Eax. Make sure // that we detected this cleared its GC-status. assert(((RBM_EAX | RBM_EDX) & (emitThisGCrefRegs | emitThisByrefRegs)) == 0); } if (instrIs3opImul(ins)) { // The target of the 3-operand imul is implicitly encoded. Make sure // that we detected the implicit register and cleared its GC-status. regMaskTP regMask = genRegMask(inst3opImulReg(ins)); assert((regMask & (emitThisGCrefRegs | emitThisByrefRegs)) == 0); } // Output any delta in GC info. if (EMIT_GC_VERBOSE || emitComp->opts.disasmWithGC) { emitDispGCInfoDelta(); } #endif return sz; } #ifdef _PREFAST_ #pragma warning(pop) #endif emitter::insFormat emitter::getMemoryOperation(instrDesc* id) { insFormat result = IF_NONE; instruction ins = id->idIns(); insFormat insFmt = id->idInsFmt(); if (ins == INS_lea) { // an INS_lea instruction doesn't actually read memory insFmt = IF_NONE; } switch (insFmt) { case IF_NONE: case IF_LABEL: case IF_RWR_LABEL: case IF_METHOD: case IF_CNS: case IF_RRD: case IF_RWR: case IF_RRW: case IF_RRD_CNS: case IF_RWR_CNS: case IF_RRW_CNS: case IF_RRW_SHF: case IF_RRD_RRD: case IF_RWR_RRD: case IF_RRW_RRD: case IF_RRW_RRW: case IF_RRW_RRW_CNS: case IF_RWR_RRD_RRD: case IF_RWR_RRD_RRD_CNS: case IF_RWR_RRD_RRD_RRD: // none, or register only result = IF_NONE; break; case IF_ARD: case IF_RRD_ARD: case IF_RWR_ARD: case IF_RRW_ARD: case IF_RWR_ARD_CNS: case IF_RWR_RRD_ARD: case IF_RRW_ARD_CNS: case IF_RWR_ARD_RRD: case IF_RWR_RRD_ARD_CNS: case IF_RWR_RRD_ARD_RRD: case IF_ARD_CNS: case IF_ARD_RRD: // Address [reg+reg*scale+cns] - read result = IF_ARD; break; case IF_AWR: case IF_AWR_RRD: case IF_AWR_CNS: case IF_AWR_RRD_CNS: case IF_AWR_RRD_RRD: // Address [reg+reg*scale+cns] - write result = IF_AWR; break; case IF_ARW: case IF_ARW_RRD: case IF_ARW_CNS: case IF_ARW_SHF: // Address [reg+reg*scale+cns] - read and write result = IF_ARW; break; case IF_MRD: case IF_MRD_CNS: case IF_MRD_OFF: case IF_MRD_RRD: case IF_RRD_MRD: case IF_RRW_MRD: case IF_RWR_MRD: case IF_RWR_MRD_CNS: case IF_RWR_MRD_OFF: case IF_RWR_RRD_MRD: case IF_RRW_MRD_CNS: case IF_RWR_RRD_MRD_CNS: case IF_RWR_RRD_MRD_RRD: case IF_METHPTR: // Address [cns] - read result = IF_MRD; break; case IF_MWR: case IF_MWR_CNS: case IF_MWR_RRD: case IF_MWR_RRD_CNS: // Address [cns] - write result = IF_MWR; break; case IF_MRW: case IF_MRW_CNS: case IF_MRW_RRD: case IF_MRW_SHF: // Address [cns] - read and write result = IF_MWR; break; case IF_SRD: case IF_SRD_CNS: case IF_SRD_RRD: case IF_RRD_SRD: case IF_RRW_SRD: case IF_RWR_SRD: case IF_RWR_SRD_CNS: case IF_RWR_RRD_SRD: case IF_RRW_SRD_CNS: case IF_RWR_RRD_SRD_CNS: case IF_RWR_RRD_SRD_RRD: // Stack [RSP] - read result = IF_SRD; break; case IF_SWR: case IF_SWR_CNS: case IF_SWR_RRD: case IF_SWR_RRD_CNS: case IF_SWR_LABEL: // Stack [RSP] - write result = IF_SWR; break; case IF_SRW: case IF_SRW_CNS: case IF_SRW_RRD: case IF_SRW_SHF: // Stack [RSP] - read and write result = IF_SWR; break; default: result = IF_NONE; break; } return result; } #if defined(DEBUG) || defined(LATE_DISASM) //---------------------------------------------------------------------------------------- // getInsExecutionCharacteristics: // Returns the current instruction execution characteristics // // Arguments: // id - The current instruction descriptor to be evaluated // // Return Value: // A struct containing the current instruction execution characteristics // // Notes: // The instruction latencies and throughput values returned by this function // are for the Intel Skylake-X processor and are from either: // 1. Agner.org - https://www.agner.org/optimize/instruction_tables.pdf // 2. uops.info - https://uops.info/table.html // emitter::insExecutionCharacteristics emitter::getInsExecutionCharacteristics(instrDesc* id) { insExecutionCharacteristics result; instruction ins = id->idIns(); insFormat insFmt = id->idInsFmt(); emitAttr opSize = id->idOpSize(); insFormat memFmt = getMemoryOperation(id); unsigned memAccessKind; result.insThroughput = PERFSCORE_THROUGHPUT_ILLEGAL; result.insLatency = PERFSCORE_LATENCY_ILLEGAL; // Model the memory latency switch (memFmt) { // Model a read from stack location, possible def to use latency from L0 cache case IF_SRD: result.insLatency = PERFSCORE_LATENCY_RD_STACK; memAccessKind = PERFSCORE_MEMORY_READ; break; case IF_SWR: result.insLatency = PERFSCORE_LATENCY_WR_STACK; memAccessKind = PERFSCORE_MEMORY_WRITE; break; case IF_SRW: result.insLatency = PERFSCORE_LATENCY_RD_WR_STACK; memAccessKind = PERFSCORE_MEMORY_READ_WRITE; break; // Model a read from a constant location, possible def to use latency from L0 cache case IF_MRD: result.insLatency = PERFSCORE_LATENCY_RD_CONST_ADDR; memAccessKind = PERFSCORE_MEMORY_READ; break; case IF_MWR: result.insLatency = PERFSCORE_LATENCY_WR_CONST_ADDR; memAccessKind = PERFSCORE_MEMORY_WRITE; break; case IF_MRW: result.insLatency = PERFSCORE_LATENCY_RD_WR_CONST_ADDR; memAccessKind = PERFSCORE_MEMORY_READ_WRITE; break; // Model a read from memory location, possible def to use latency from L0 or L1 cache case IF_ARD: result.insLatency = PERFSCORE_LATENCY_RD_GENERAL; memAccessKind = PERFSCORE_MEMORY_READ; break; case IF_AWR: result.insLatency = PERFSCORE_LATENCY_WR_GENERAL; memAccessKind = PERFSCORE_MEMORY_WRITE; break; case IF_ARW: result.insLatency = PERFSCORE_LATENCY_RD_WR_GENERAL; memAccessKind = PERFSCORE_MEMORY_READ_WRITE; break; case IF_NONE: result.insLatency = PERFSCORE_LATENCY_ZERO; memAccessKind = PERFSCORE_MEMORY_NONE; break; default: assert(!"Unhandled insFmt for switch (memFmt)"); result.insLatency = PERFSCORE_LATENCY_ZERO; memAccessKind = PERFSCORE_MEMORY_NONE; break; } result.insMemoryAccessKind = memAccessKind; switch (ins) { case INS_align: #if FEATURE_LOOP_ALIGN if ((id->idCodeSize() == 0) || ((instrDescAlign*)id)->isPlacedAfterJmp) { // Either we're not going to generate 'align' instruction, or the 'align' // instruction is placed immediately after unconditional jmp. // In both cases, don't count for PerfScore. result.insThroughput = PERFSCORE_THROUGHPUT_ZERO; result.insLatency = PERFSCORE_LATENCY_ZERO; break; } #endif FALLTHROUGH; case INS_nop: case INS_int3: assert(memFmt == IF_NONE); result.insThroughput = PERFSCORE_THROUGHPUT_4X; result.insLatency = PERFSCORE_LATENCY_ZERO; break; case INS_push: case INS_push_hide: result.insThroughput = PERFSCORE_THROUGHPUT_1C; if (insFmt == IF_RRD) // push reg { // For pushes (stack writes) we assume that the full latency will be covered result.insLatency = PERFSCORE_LATENCY_ZERO; } break; case INS_pop: case INS_pop_hide: if (insFmt == IF_RWR) // pop reg { result.insThroughput = PERFSCORE_THROUGHPUT_2X; // For pops (stack reads) we assume that the full latency will be covered result.insLatency = PERFSCORE_LATENCY_ZERO; } else { result.insThroughput = PERFSCORE_THROUGHPUT_1C; } break; case INS_inc: case INS_dec: case INS_neg: case INS_not: if (memFmt == IF_NONE) { // ins reg result.insThroughput = PERFSCORE_THROUGHPUT_4X; result.insLatency = PERFSCORE_LATENCY_1C; } else { // ins mem result.insThroughput = PERFSCORE_THROUGHPUT_1C; // no additional R/W latency } break; #ifdef TARGET_AMD64 case INS_movsxd: #endif case INS_mov: case INS_movsx: case INS_movzx: case INS_cwde: case INS_cmp: case INS_test: if (memFmt == IF_NONE) { result.insThroughput = PERFSCORE_THROUGHPUT_4X; } else if (memAccessKind == PERFSCORE_MEMORY_READ) { result.insThroughput = PERFSCORE_THROUGHPUT_2X; if (ins == INS_cmp || ins == INS_test) { result.insLatency += PERFSCORE_LATENCY_1C; } else if (ins == INS_movsx #ifdef TARGET_AMD64 || ins == INS_movsxd #endif ) { result.insLatency += PERFSCORE_LATENCY_2C; } } else // writes { assert(memAccessKind == PERFSCORE_MEMORY_WRITE); assert(ins == INS_mov); result.insThroughput = PERFSCORE_THROUGHPUT_1C; } break; case INS_adc: case INS_sbb: if (memAccessKind != PERFSCORE_MEMORY_READ_WRITE) { result.insThroughput = PERFSCORE_THROUGHPUT_2X; result.insLatency += PERFSCORE_LATENCY_1C; } else { result.insThroughput = PERFSCORE_THROUGHPUT_1C; // no additional R/W latency } break; case INS_add: case INS_sub: case INS_and: case INS_or: case INS_xor: if (memFmt == IF_NONE) { result.insThroughput = PERFSCORE_THROUGHPUT_4X; result.insLatency = PERFSCORE_LATENCY_1C; } else if (memAccessKind == PERFSCORE_MEMORY_READ_WRITE) { result.insThroughput = PERFSCORE_THROUGHPUT_1C; // no additional R/W latency } else { result.insThroughput = PERFSCORE_THROUGHPUT_2X; result.insLatency += PERFSCORE_LATENCY_1C; } break; case INS_lea: // uops.info result.insThroughput = PERFSCORE_THROUGHPUT_2X; // one or two components result.insLatency = PERFSCORE_LATENCY_1C; if (insFmt == IF_RWR_LABEL) { // RIP relative addressing // // - throughput is only 1 per cycle // result.insThroughput = PERFSCORE_THROUGHPUT_1C; } else if (insFmt != IF_RWR_SRD) { if (id->idAddr()->iiaAddrMode.amIndxReg != REG_NA) { regNumber baseReg = id->idAddr()->iiaAddrMode.amBaseReg; if (baseReg != REG_NA) { ssize_t dsp = emitGetInsAmdAny(id); if ((dsp != 0) || baseRegisterRequiresDisplacement(baseReg)) { // three components // // - throughput is only 1 per cycle // result.insThroughput = PERFSCORE_THROUGHPUT_1C; if (baseRegisterRequiresDisplacement(baseReg) || id->idIsDspReloc()) { // Increased Latency for these cases // - see https://reviews.llvm.org/D32277 // result.insLatency = PERFSCORE_LATENCY_3C; } } } } } break; case INS_imul_AX: case INS_imul_BX: case INS_imul_CX: case INS_imul_DX: case INS_imul_BP: case INS_imul_SI: case INS_imul_DI: #ifdef TARGET_AMD64 case INS_imul_08: case INS_imul_09: case INS_imul_10: case INS_imul_11: case INS_imul_12: case INS_imul_13: case INS_imul_14: case INS_imul_15: #endif // TARGET_AMD64 case INS_imul: result.insThroughput = PERFSCORE_THROUGHPUT_1C; result.insLatency += PERFSCORE_LATENCY_3C; break; case INS_mulEAX: case INS_imulEAX: // uops.info: mul/imul rdx:rax,reg latency is 3 only if the low half of the result is needed, but in that // case codegen uses imul reg,reg instruction form (except for unsigned overflow checks, which are rare) result.insThroughput = PERFSCORE_THROUGHPUT_1C; result.insLatency += PERFSCORE_LATENCY_4C; break; case INS_div: // The integer divide instructions have long latencies if (opSize == EA_8BYTE) { result.insThroughput = PERFSCORE_THROUGHPUT_52C; result.insLatency = PERFSCORE_LATENCY_62C; } else { assert(opSize == EA_4BYTE); result.insThroughput = PERFSCORE_THROUGHPUT_6C; result.insLatency = PERFSCORE_LATENCY_26C; } break; case INS_idiv: // The integer divide instructions have long latenies if (opSize == EA_8BYTE) { result.insThroughput = PERFSCORE_THROUGHPUT_57C; result.insLatency = PERFSCORE_LATENCY_69C; } else { assert(opSize == EA_4BYTE); result.insThroughput = PERFSCORE_THROUGHPUT_6C; result.insLatency = PERFSCORE_LATENCY_26C; } break; case INS_cdq: result.insThroughput = PERFSCORE_THROUGHPUT_2X; result.insLatency = PERFSCORE_LATENCY_1C; break; case INS_shl: case INS_shr: case INS_sar: case INS_ror: case INS_rol: switch (insFmt) { case IF_RRW_CNS: // ins reg, cns result.insThroughput = PERFSCORE_THROUGHPUT_2X; result.insLatency = PERFSCORE_LATENCY_1C; break; case IF_MRW_CNS: case IF_SRW_CNS: case IF_ARW_CNS: // ins [mem], cns result.insThroughput = PERFSCORE_THROUGHPUT_2C; result.insLatency += PERFSCORE_LATENCY_1C; break; case IF_RRW: // ins reg, cl result.insThroughput = PERFSCORE_THROUGHPUT_2C; result.insLatency = PERFSCORE_LATENCY_2C; break; case IF_MRW: case IF_SRW: case IF_ARW: // ins [mem], cl result.insThroughput = PERFSCORE_THROUGHPUT_4C; result.insLatency += PERFSCORE_LATENCY_2C; break; default: // unhandled instruction insFmt combination perfScoreUnhandledInstruction(id, &result); break; } break; case INS_shl_1: case INS_shr_1: case INS_sar_1: result.insLatency += PERFSCORE_LATENCY_1C; switch (insFmt) { case IF_RRW: // ins reg, 1 result.insThroughput = PERFSCORE_THROUGHPUT_2X; break; case IF_MRW: case IF_SRW: case IF_ARW: // ins [mem], 1 result.insThroughput = PERFSCORE_THROUGHPUT_2C; break; default: // unhandled instruction insFmt combination perfScoreUnhandledInstruction(id, &result); break; } break; case INS_ror_1: case INS_rol_1: result.insThroughput = PERFSCORE_THROUGHPUT_1C; result.insLatency += PERFSCORE_LATENCY_1C; break; case INS_shl_N: case INS_shr_N: case INS_sar_N: case INS_ror_N: case INS_rol_N: result.insLatency += PERFSCORE_LATENCY_1C; switch (insFmt) { case IF_RRW_SHF: // ins reg, cns result.insThroughput = PERFSCORE_THROUGHPUT_2X; break; case IF_MRW_SHF: case IF_SRW_SHF: case IF_ARW_SHF: // ins [mem], cns result.insThroughput = PERFSCORE_THROUGHPUT_2C; break; default: // unhandled instruction insFmt combination perfScoreUnhandledInstruction(id, &result); break; } break; case INS_rcr: case INS_rcl: result.insThroughput = PERFSCORE_THROUGHPUT_6C; result.insLatency += PERFSCORE_LATENCY_6C; break; case INS_rcr_1: case INS_rcl_1: // uops.info result.insThroughput = PERFSCORE_THROUGHPUT_1C; result.insLatency += PERFSCORE_LATENCY_2C; break; case INS_shld: case INS_shrd: result.insLatency += PERFSCORE_LATENCY_3C; if (insFmt == IF_RRW_RRW_CNS) { // ins reg, reg, cns result.insThroughput = PERFSCORE_THROUGHPUT_1C; } else { assert(memAccessKind == PERFSCORE_MEMORY_WRITE); // _SHF form never emitted result.insThroughput = PERFSCORE_THROUGHPUT_2C; } break; case INS_bt: result.insLatency += PERFSCORE_LATENCY_1C; if ((insFmt == IF_RRD_RRD) || (insFmt == IF_RRD_CNS)) { result.insThroughput = PERFSCORE_THROUGHPUT_2X; } else { result.insThroughput = PERFSCORE_THROUGHPUT_1C; } break; case INS_seto: case INS_setno: case INS_setb: case INS_setae: case INS_sete: case INS_setne: case INS_setbe: case INS_seta: case INS_sets: case INS_setns: case INS_setp: case INS_setnp: case INS_setl: case INS_setge: case INS_setle: case INS_setg: result.insLatency += PERFSCORE_LATENCY_1C; if (insFmt == IF_RRD) { result.insThroughput = PERFSCORE_THROUGHPUT_2X; } else { result.insThroughput = PERFSCORE_THROUGHPUT_1C; } break; case INS_jo: case INS_jno: case INS_jb: case INS_jae: case INS_je: case INS_jne: case INS_jbe: case INS_ja: case INS_js: case INS_jns: case INS_jp: case INS_jnp: case INS_jl: case INS_jge: case INS_jle: case INS_jg: // conditional branch result.insThroughput = PERFSCORE_THROUGHPUT_2X; result.insLatency = PERFSCORE_LATENCY_BRANCH_COND; break; case INS_jmp: case INS_l_jmp: // branch to a constant address result.insThroughput = PERFSCORE_THROUGHPUT_2C; result.insLatency = PERFSCORE_LATENCY_BRANCH_DIRECT; break; case INS_tail_i_jmp: case INS_i_jmp: // branch to register result.insThroughput = PERFSCORE_THROUGHPUT_2C; result.insLatency = PERFSCORE_LATENCY_BRANCH_INDIRECT; break; case INS_call: // uops.info result.insLatency = PERFSCORE_LATENCY_ZERO; switch (insFmt) { case IF_LABEL: result.insThroughput = PERFSCORE_THROUGHPUT_1C; break; case IF_METHOD: result.insThroughput = PERFSCORE_THROUGHPUT_1C; break; case IF_METHPTR: result.insThroughput = PERFSCORE_THROUGHPUT_3C; break; case IF_SRD: result.insThroughput = PERFSCORE_THROUGHPUT_3C; break; case IF_ARD: result.insThroughput = PERFSCORE_THROUGHPUT_3C; break; default: // unhandled instruction, insFmt combination perfScoreUnhandledInstruction(id, &result); break; } break; case INS_ret: if (insFmt == IF_CNS) { result.insThroughput = PERFSCORE_THROUGHPUT_2C; } else { assert(insFmt == IF_NONE); result.insThroughput = PERFSCORE_THROUGHPUT_1C; } break; case INS_lock: result.insThroughput = PERFSCORE_THROUGHPUT_13C; break; case INS_xadd: // uops.info result.insThroughput = PERFSCORE_THROUGHPUT_1C; result.insLatency += PERFSCORE_LATENCY_1C; break; case INS_cmpxchg: result.insThroughput = PERFSCORE_THROUGHPUT_5C; break; case INS_xchg: // uops.info result.insThroughput = PERFSCORE_THROUGHPUT_1C; if (memFmt == IF_NONE) { result.insLatency = PERFSCORE_LATENCY_1C; } else { result.insLatency = PERFSCORE_LATENCY_23C; } break; #ifdef TARGET_X86 case INS_fld: case INS_fstp: result.insThroughput = PERFSCORE_THROUGHPUT_2X; if (memAccessKind == PERFSCORE_MEMORY_NONE) { result.insLatency = PERFSCORE_LATENCY_1C; } break; #endif // TARGET_X86 #ifdef TARGET_AMD64 case INS_movsq: case INS_stosq: #endif // TARGET_AMD64 case INS_movsd: case INS_stosd: // uops.info result.insThroughput = PERFSCORE_THROUGHPUT_1C; break; #ifdef TARGET_AMD64 case INS_r_movsq: case INS_r_stosq: #endif // TARGET_AMD64 case INS_r_movsd: case INS_r_movsb: case INS_r_stosd: case INS_r_stosb: // Actually variable sized: rep stosd, used to zero frame slots // uops.info result.insThroughput = PERFSCORE_THROUGHPUT_25C; break; case INS_movd: case INS_movq: // only MOVQ xmm, xmm is different (emitted by Sse2.MoveScalar, should use MOVDQU instead) if (memAccessKind == PERFSCORE_MEMORY_NONE) { // movd r32, xmm or xmm, r32 result.insThroughput = PERFSCORE_THROUGHPUT_1C; result.insLatency = PERFSCORE_LATENCY_3C; } else if (memAccessKind == PERFSCORE_MEMORY_READ) { // movd xmm, m32 result.insThroughput = PERFSCORE_THROUGHPUT_2X; result.insLatency += PERFSCORE_LATENCY_2C; } else { // movd m32, xmm assert(memAccessKind == PERFSCORE_MEMORY_WRITE); result.insThroughput = PERFSCORE_THROUGHPUT_1C; result.insLatency += PERFSCORE_LATENCY_2C; } break; case INS_movdqa: case INS_movdqu: case INS_movaps: case INS_movups: case INS_movapd: case INS_movupd: if (memAccessKind == PERFSCORE_MEMORY_NONE) { // ins reg, reg result.insThroughput = PERFSCORE_THROUGHPUT_4X; result.insLatency = PERFSCORE_LATENCY_ZERO; } else if (memAccessKind == PERFSCORE_MEMORY_READ) { // ins reg, mem result.insThroughput = PERFSCORE_THROUGHPUT_2X; result.insLatency += opSize == EA_32BYTE ? PERFSCORE_LATENCY_3C : PERFSCORE_LATENCY_2C; } else { // ins mem, reg assert(memAccessKind == PERFSCORE_MEMORY_WRITE); result.insThroughput = PERFSCORE_THROUGHPUT_1C; result.insLatency += PERFSCORE_LATENCY_2C; } break; case INS_movhps: case INS_movhpd: case INS_movlps: case INS_movlpd: result.insThroughput = PERFSCORE_THROUGHPUT_1C; if (memAccessKind == PERFSCORE_MEMORY_READ) { result.insLatency += PERFSCORE_LATENCY_3C; } else { assert(memAccessKind == PERFSCORE_MEMORY_WRITE); result.insLatency += PERFSCORE_LATENCY_2C; } break; case INS_movhlps: case INS_movlhps: result.insThroughput = PERFSCORE_THROUGHPUT_1C; result.insLatency = PERFSCORE_LATENCY_1C; break; case INS_movntdq: case INS_movnti: case INS_movntps: case INS_movntpd: assert(memAccessKind == PERFSCORE_MEMORY_WRITE); result.insThroughput = PERFSCORE_THROUGHPUT_1C; result.insLatency = PERFSCORE_LATENCY_400C; // Intel microcode issue with these instuctions break; case INS_maskmovdqu: result.insThroughput = PERFSCORE_THROUGHPUT_6C; result.insLatency = PERFSCORE_LATENCY_400C; // Intel microcode issue with these instuctions break; case INS_movntdqa: assert(memAccessKind == PERFSCORE_MEMORY_READ); result.insThroughput = PERFSCORE_THROUGHPUT_2X; result.insLatency += opSize == EA_32BYTE ? PERFSCORE_LATENCY_3C : PERFSCORE_LATENCY_2C; break; case INS_vzeroupper: result.insThroughput = PERFSCORE_THROUGHPUT_1C; // insLatency is zero and is set when we Model the memory latency break; case INS_movss: case INS_movsdsse2: case INS_movddup: if (memAccessKind == PERFSCORE_MEMORY_NONE) { result.insThroughput = PERFSCORE_THROUGHPUT_1C; result.insLatency = PERFSCORE_LATENCY_1C; } else if (memAccessKind == PERFSCORE_MEMORY_READ) { result.insThroughput = PERFSCORE_THROUGHPUT_2X; result.insLatency += opSize == EA_32BYTE ? PERFSCORE_LATENCY_3C : PERFSCORE_LATENCY_2C; } else { result.insThroughput = PERFSCORE_THROUGHPUT_1C; result.insLatency += PERFSCORE_LATENCY_2C; } break; case INS_lddqu: result.insThroughput = PERFSCORE_THROUGHPUT_2X; result.insLatency += opSize == EA_32BYTE ? PERFSCORE_LATENCY_3C : PERFSCORE_LATENCY_2C; break; case INS_comiss: case INS_comisd: case INS_ucomiss: case INS_ucomisd: result.insThroughput = PERFSCORE_THROUGHPUT_1C; result.insLatency += PERFSCORE_LATENCY_3C; break; case INS_addsd: case INS_addss: case INS_addpd: case INS_addps: case INS_subsd: case INS_subss: case INS_subpd: case INS_subps: case INS_cvttps2dq: case INS_cvtps2dq: case INS_cvtdq2ps: result.insThroughput = PERFSCORE_THROUGHPUT_2X; result.insLatency += PERFSCORE_LATENCY_4C; break; case INS_haddps: case INS_haddpd: result.insThroughput = PERFSCORE_THROUGHPUT_2C; result.insLatency += PERFSCORE_LATENCY_6C; break; case INS_mulss: case INS_mulsd: case INS_mulps: case INS_mulpd: result.insThroughput = PERFSCORE_THROUGHPUT_2X; result.insLatency += PERFSCORE_LATENCY_4C; break; case INS_divss: case INS_divps: result.insThroughput = PERFSCORE_THROUGHPUT_3C; result.insLatency += PERFSCORE_LATENCY_11C; break; case INS_divsd: case INS_divpd: result.insThroughput = PERFSCORE_THROUGHPUT_4C; result.insLatency += PERFSCORE_LATENCY_13C; break; case INS_sqrtss: case INS_sqrtps: result.insThroughput = PERFSCORE_THROUGHPUT_3C; result.insLatency += PERFSCORE_LATENCY_12C; break; case INS_sqrtsd: case INS_sqrtpd: result.insThroughput = PERFSCORE_THROUGHPUT_4C; result.insLatency += PERFSCORE_LATENCY_13C; break; case INS_rcpps: case INS_rcpss: case INS_rsqrtss: case INS_rsqrtps: result.insThroughput = PERFSCORE_THROUGHPUT_1C; result.insLatency += PERFSCORE_LATENCY_4C; break; case INS_roundpd: case INS_roundps: case INS_roundsd: case INS_roundss: result.insThroughput = PERFSCORE_THROUGHPUT_1C; result.insLatency += PERFSCORE_LATENCY_8C; break; case INS_cvttsd2si: case INS_cvtsd2si: case INS_cvtsi2sd: case INS_cvtsi2ss: result.insThroughput = PERFSCORE_THROUGHPUT_1C; result.insLatency += PERFSCORE_LATENCY_7C; break; case INS_cvttss2si: case INS_cvtss2si: result.insThroughput = PERFSCORE_THROUGHPUT_1C; result.insLatency += opSize == EA_8BYTE ? PERFSCORE_LATENCY_8C : PERFSCORE_LATENCY_7C; break; case INS_cvtss2sd: result.insThroughput = PERFSCORE_THROUGHPUT_1C; result.insLatency += PERFSCORE_LATENCY_5C; break; case INS_paddb: case INS_psubb: case INS_paddw: case INS_psubw: case INS_paddd: case INS_psubd: case INS_paddq: case INS_psubq: case INS_paddsb: case INS_psubsb: case INS_paddsw: case INS_psubsw: case INS_paddusb: case INS_psubusb: case INS_paddusw: case INS_psubusw: case INS_pand: case INS_pandn: case INS_por: case INS_pxor: case INS_andpd: case INS_andps: case INS_andnpd: case INS_andnps: case INS_orpd: case INS_orps: case INS_xorpd: case INS_xorps: case INS_blendps: case INS_blendpd: case INS_vpblendd: result.insLatency += PERFSCORE_LATENCY_1C; if (memAccessKind == PERFSCORE_MEMORY_NONE) { result.insThroughput = PERFSCORE_THROUGHPUT_3X; } else { result.insThroughput = PERFSCORE_THROUGHPUT_2X; } break; case INS_andn: case INS_pcmpeqb: case INS_pcmpeqw: case INS_pcmpeqd: case INS_pcmpeqq: case INS_pcmpgtb: case INS_pcmpgtw: case INS_pcmpgtd: case INS_pavgb: case INS_pavgw: case INS_pminub: case INS_pminsb: case INS_pminuw: case INS_pminsw: case INS_pminud: case INS_pminsd: case INS_pmaxub: case INS_pmaxsb: case INS_pmaxuw: case INS_pmaxsw: case INS_pmaxsd: case INS_pmaxud: case INS_pabsb: case INS_pabsw: case INS_pabsd: case INS_psignb: case INS_psignw: case INS_psignd: case INS_vpsravd: case INS_blendvps: case INS_blendvpd: case INS_pblendvb: case INS_vpsllvd: case INS_vpsllvq: case INS_vpsrlvd: case INS_vpsrlvq: result.insThroughput = PERFSCORE_THROUGHPUT_2X; result.insLatency += PERFSCORE_LATENCY_1C; break; case INS_pslld: case INS_psllw: case INS_psllq: case INS_psrlw: case INS_psrld: case INS_psrlq: case INS_psrad: case INS_psraw: if (insFmt == IF_RWR_CNS) { result.insLatency = PERFSCORE_LATENCY_1C; result.insThroughput = PERFSCORE_THROUGHPUT_2X; } else { result.insLatency += opSize == EA_32BYTE ? PERFSCORE_LATENCY_4C : PERFSCORE_LATENCY_2C; result.insThroughput = PERFSCORE_THROUGHPUT_1C; } break; case INS_blsi: case INS_blsmsk: case INS_blsr: case INS_bzhi: case INS_rorx: result.insLatency += PERFSCORE_LATENCY_1C; result.insThroughput = PERFSCORE_THROUGHPUT_2X; break; case INS_bextr: result.insLatency += PERFSCORE_LATENCY_2C; result.insThroughput = PERFSCORE_THROUGHPUT_2X; break; case INS_packuswb: case INS_packusdw: case INS_packsswb: case INS_packssdw: case INS_unpcklps: case INS_unpckhps: case INS_unpcklpd: case INS_unpckhpd: case INS_punpckldq: case INS_punpcklwd: case INS_punpcklbw: case INS_punpckhdq: case INS_punpckhwd: case INS_punpckhbw: case INS_punpcklqdq: case INS_punpckhqdq: case INS_pshufb: case INS_pshufd: case INS_pshuflw: case INS_pshufhw: case INS_shufps: case INS_shufpd: case INS_pblendw: case INS_movsldup: case INS_movshdup: case INS_insertps: case INS_palignr: case INS_vpermilps: case INS_vpermilpd: case INS_vpermilpsvar: case INS_vpermilpdvar: case INS_pslldq: case INS_psrldq: result.insThroughput = PERFSCORE_THROUGHPUT_1C; result.insLatency += PERFSCORE_LATENCY_1C; break; case INS_vblendvps: case INS_vblendvpd: case INS_vpblendvb: result.insThroughput = PERFSCORE_THROUGHPUT_1C; result.insLatency += PERFSCORE_LATENCY_2C; break; case INS_bswap: if (opSize == EA_8BYTE) { result.insThroughput = PERFSCORE_THROUGHPUT_1C; result.insLatency = PERFSCORE_LATENCY_2C; } else { assert(opSize == EA_4BYTE); result.insThroughput = PERFSCORE_THROUGHPUT_2X; result.insLatency = PERFSCORE_LATENCY_1C; } break; case INS_pmovmskb: case INS_movmskpd: case INS_movmskps: result.insThroughput = PERFSCORE_THROUGHPUT_1C; if (opSize == EA_32BYTE) { result.insLatency += ins == INS_pmovmskb ? PERFSCORE_LATENCY_4C : PERFSCORE_LATENCY_5C; } else { result.insLatency += PERFSCORE_LATENCY_3C; } break; case INS_bsf: case INS_bsr: case INS_lzcnt: case INS_tzcnt: case INS_popcnt: case INS_crc32: case INS_pdep: case INS_pext: case INS_pcmpgtq: case INS_psadbw: case INS_vpermps: case INS_vpermpd: case INS_vpermd: case INS_vpermq: case INS_vperm2i128: case INS_vperm2f128: case INS_vextractf128: case INS_vextracti128: case INS_vinsertf128: case INS_vinserti128: result.insThroughput = PERFSCORE_THROUGHPUT_1C; result.insLatency += PERFSCORE_LATENCY_3C; break; case INS_pextrb: case INS_pextrd: case INS_pextrw: case INS_pextrq: case INS_pextrw_sse41: case INS_addsubps: case INS_addsubpd: result.insThroughput = PERFSCORE_THROUGHPUT_1C; result.insLatency += PERFSCORE_LATENCY_4C; break; case INS_pmovsxbw: case INS_pmovsxbd: case INS_pmovsxbq: case INS_pmovsxwd: case INS_pmovsxwq: case INS_pmovsxdq: case INS_pmovzxbw: case INS_pmovzxbd: case INS_pmovzxbq: case INS_pmovzxwd: case INS_pmovzxwq: case INS_pmovzxdq: result.insThroughput = PERFSCORE_THROUGHPUT_1C; result.insLatency += opSize == EA_32BYTE ? PERFSCORE_LATENCY_3C : PERFSCORE_LATENCY_1C; break; case INS_phaddw: case INS_phaddd: case INS_phaddsw: case INS_phsubw: case INS_phsubsw: case INS_phsubd: result.insThroughput = PERFSCORE_THROUGHPUT_2C; result.insLatency += PERFSCORE_LATENCY_3C; break; case INS_cmpps: case INS_cmppd: case INS_cmpss: case INS_cmpsd: result.insThroughput = PERFSCORE_THROUGHPUT_2X; result.insLatency = PERFSCORE_LATENCY_4C; break; case INS_mulx: case INS_maxps: case INS_maxpd: case INS_maxss: case INS_maxsd: case INS_minps: case INS_minpd: case INS_minss: case INS_minsd: case INS_phminposuw: case INS_extractps: result.insThroughput = PERFSCORE_THROUGHPUT_1C; result.insLatency += PERFSCORE_LATENCY_4C; break; case INS_ptest: result.insThroughput = PERFSCORE_THROUGHPUT_1C; result.insLatency += opSize == EA_32BYTE ? PERFSCORE_LATENCY_6C : PERFSCORE_LATENCY_4C; break; case INS_mpsadbw: result.insThroughput = PERFSCORE_THROUGHPUT_2C; result.insLatency += PERFSCORE_LATENCY_4C; break; case INS_pmullw: case INS_pmulhw: case INS_pmulhuw: case INS_pmulhrsw: case INS_pmuldq: case INS_pmuludq: case INS_pmaddwd: case INS_pmaddubsw: result.insThroughput = PERFSCORE_THROUGHPUT_2X; result.insLatency += PERFSCORE_LATENCY_5C; break; case INS_cvtsd2ss: case INS_cvtps2pd: case INS_cvtpd2dq: case INS_cvtdq2pd: case INS_cvtpd2ps: case INS_cvttpd2dq: result.insThroughput = PERFSCORE_THROUGHPUT_1C; result.insLatency += opSize == EA_32BYTE ? PERFSCORE_LATENCY_7C : PERFSCORE_LATENCY_5C; break; case INS_vtestps: case INS_vtestpd: result.insThroughput = PERFSCORE_THROUGHPUT_1C; result.insLatency += opSize == EA_32BYTE ? PERFSCORE_LATENCY_5C : PERFSCORE_LATENCY_3C; break; case INS_hsubps: case INS_hsubpd: result.insThroughput = PERFSCORE_THROUGHPUT_2C; result.insLatency += PERFSCORE_LATENCY_6C; break; case INS_pclmulqdq: result.insThroughput = PERFSCORE_THROUGHPUT_1C; result.insLatency += PERFSCORE_LATENCY_7C; break; case INS_pmulld: result.insThroughput = PERFSCORE_THROUGHPUT_1C; result.insLatency += PERFSCORE_LATENCY_10C; break; case INS_vpbroadcastb: case INS_vpbroadcastw: case INS_vpbroadcastd: case INS_vpbroadcastq: case INS_vbroadcasti128: case INS_vbroadcastf128: case INS_vbroadcastss: case INS_vbroadcastsd: if (memAccessKind == PERFSCORE_MEMORY_NONE) { result.insThroughput = PERFSCORE_THROUGHPUT_1C; result.insLatency = opSize == EA_32BYTE ? PERFSCORE_LATENCY_3C : PERFSCORE_LATENCY_1C; } else { result.insThroughput = PERFSCORE_THROUGHPUT_2X; result.insLatency += opSize == EA_32BYTE ? PERFSCORE_LATENCY_3C : PERFSCORE_LATENCY_2C; if (ins == INS_vpbroadcastb || ins == INS_vpbroadcastw) { result.insLatency += PERFSCORE_LATENCY_1C; } } break; case INS_pinsrb: case INS_pinsrw: case INS_pinsrd: case INS_pinsrq: if (memAccessKind == PERFSCORE_MEMORY_NONE) { result.insThroughput = PERFSCORE_THROUGHPUT_2C; result.insLatency = PERFSCORE_LATENCY_4C; } else { result.insThroughput = PERFSCORE_THROUGHPUT_1C; result.insLatency += PERFSCORE_LATENCY_3C; } break; case INS_dppd: result.insThroughput = PERFSCORE_THROUGHPUT_1C; result.insLatency = PERFSCORE_LATENCY_9C; break; case INS_dpps: result.insThroughput = PERFSCORE_THROUGHPUT_2C; result.insLatency = PERFSCORE_LATENCY_13C; break; case INS_vfmadd132pd: case INS_vfmadd213pd: case INS_vfmadd231pd: case INS_vfmadd132ps: case INS_vfmadd213ps: case INS_vfmadd231ps: case INS_vfmadd132sd: case INS_vfmadd213sd: case INS_vfmadd231sd: case INS_vfmadd132ss: case INS_vfmadd213ss: case INS_vfmadd231ss: case INS_vfmaddsub132pd: case INS_vfmaddsub213pd: case INS_vfmaddsub231pd: case INS_vfmaddsub132ps: case INS_vfmaddsub213ps: case INS_vfmaddsub231ps: case INS_vfmsubadd132pd: case INS_vfmsubadd213pd: case INS_vfmsubadd231pd: case INS_vfmsubadd132ps: case INS_vfmsubadd213ps: case INS_vfmsubadd231ps: case INS_vfmsub132pd: case INS_vfmsub213pd: case INS_vfmsub231pd: case INS_vfmsub132ps: case INS_vfmsub213ps: case INS_vfmsub231ps: case INS_vfmsub132sd: case INS_vfmsub213sd: case INS_vfmsub231sd: case INS_vfmsub132ss: case INS_vfmsub213ss: case INS_vfmsub231ss: case INS_vfnmadd132pd: case INS_vfnmadd213pd: case INS_vfnmadd231pd: case INS_vfnmadd132ps: case INS_vfnmadd213ps: case INS_vfnmadd231ps: case INS_vfnmadd132sd: case INS_vfnmadd213sd: case INS_vfnmadd231sd: case INS_vfnmadd132ss: case INS_vfnmadd213ss: case INS_vfnmadd231ss: case INS_vfnmsub132pd: case INS_vfnmsub213pd: case INS_vfnmsub231pd: case INS_vfnmsub132ps: case INS_vfnmsub213ps: case INS_vfnmsub231ps: case INS_vfnmsub132sd: case INS_vfnmsub213sd: case INS_vfnmsub231sd: case INS_vfnmsub132ss: case INS_vfnmsub213ss: case INS_vfnmsub231ss: case INS_vpdpbusd: // will be populated when the HW becomes publicly available case INS_vpdpwssd: // will be populated when the HW becomes publicly available case INS_vpdpbusds: // will be populated when the HW becomes publicly available case INS_vpdpwssds: // will be populated when the HW becomes publicly available // uops.info result.insThroughput = PERFSCORE_THROUGHPUT_2X; result.insLatency += PERFSCORE_LATENCY_4C; break; case INS_vmaskmovpd: case INS_vmaskmovps: case INS_vpmaskmovd: case INS_vpmaskmovq: if (memAccessKind == PERFSCORE_MEMORY_READ) { result.insThroughput = PERFSCORE_THROUGHPUT_2X; result.insLatency += opSize == EA_32BYTE ? PERFSCORE_LATENCY_4C : PERFSCORE_LATENCY_3C; } else { assert(memAccessKind == PERFSCORE_MEMORY_WRITE); result.insThroughput = PERFSCORE_THROUGHPUT_1C; result.insLatency += PERFSCORE_LATENCY_12C; } break; case INS_vpgatherdd: case INS_vgatherdps: result.insThroughput = PERFSCORE_THROUGHPUT_4C; result.insLatency += opSize == EA_32BYTE ? PERFSCORE_LATENCY_13C : PERFSCORE_LATENCY_11C; break; case INS_vpgatherdq: case INS_vpgatherqd: case INS_vpgatherqq: case INS_vgatherdpd: case INS_vgatherqps: case INS_vgatherqpd: result.insThroughput = PERFSCORE_THROUGHPUT_4C; result.insLatency += opSize == EA_32BYTE ? PERFSCORE_LATENCY_11C : PERFSCORE_LATENCY_9C; break; case INS_aesdec: case INS_aesdeclast: case INS_aesenc: case INS_aesenclast: result.insThroughput = PERFSCORE_THROUGHPUT_1C; result.insLatency += PERFSCORE_LATENCY_4C; break; case INS_aesimc: result.insThroughput = PERFSCORE_THROUGHPUT_2C; result.insLatency += PERFSCORE_LATENCY_8C; break; case INS_aeskeygenassist: result.insThroughput = PERFSCORE_THROUGHPUT_13C; result.insLatency += PERFSCORE_LATENCY_7C; break; case INS_lfence: result.insThroughput = PERFSCORE_THROUGHPUT_4C; break; case INS_sfence: result.insThroughput = PERFSCORE_THROUGHPUT_6C; break; case INS_mfence: result.insThroughput = PERFSCORE_THROUGHPUT_33C; break; case INS_prefetcht0: case INS_prefetcht1: case INS_prefetcht2: case INS_prefetchnta: result.insThroughput = PERFSCORE_THROUGHPUT_2X; break; case INS_pause: { result.insLatency = PERFSCORE_LATENCY_140C; result.insThroughput = PERFSCORE_THROUGHPUT_140C; break; } default: // unhandled instruction insFmt combination perfScoreUnhandledInstruction(id, &result); break; } return result; } #endif // defined(DEBUG) || defined(LATE_DISASM) /*****************************************************************************/ /*****************************************************************************/ #endif // defined(TARGET_XARCH)
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX emitX86.cpp XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ #include "jitpch.h" #ifdef _MSC_VER #pragma hdrstop #endif #if defined(TARGET_XARCH) /*****************************************************************************/ /*****************************************************************************/ #include "instr.h" #include "emit.h" #include "codegen.h" bool emitter::IsSSEInstruction(instruction ins) { return (ins >= INS_FIRST_SSE_INSTRUCTION) && (ins <= INS_LAST_SSE_INSTRUCTION); } bool emitter::IsSSEOrAVXInstruction(instruction ins) { return (ins >= INS_FIRST_SSE_INSTRUCTION) && (ins <= INS_LAST_AVX_INSTRUCTION); } bool emitter::IsAVXOnlyInstruction(instruction ins) { return (ins >= INS_FIRST_AVX_INSTRUCTION) && (ins <= INS_LAST_AVX_INSTRUCTION); } bool emitter::IsFMAInstruction(instruction ins) { return (ins >= INS_FIRST_FMA_INSTRUCTION) && (ins <= INS_LAST_FMA_INSTRUCTION); } bool emitter::IsAVXVNNIInstruction(instruction ins) { return (ins >= INS_FIRST_AVXVNNI_INSTRUCTION) && (ins <= INS_LAST_AVXVNNI_INSTRUCTION); } bool emitter::IsBMIInstruction(instruction ins) { return (ins >= INS_FIRST_BMI_INSTRUCTION) && (ins <= INS_LAST_BMI_INSTRUCTION); } regNumber emitter::getBmiRegNumber(instruction ins) { switch (ins) { case INS_blsi: { return (regNumber)3; } case INS_blsmsk: { return (regNumber)2; } case INS_blsr: { return (regNumber)1; } default: { assert(IsBMIInstruction(ins)); return REG_NA; } } } regNumber emitter::getSseShiftRegNumber(instruction ins) { switch (ins) { case INS_psrldq: { return (regNumber)3; } case INS_pslldq: { return (regNumber)7; } case INS_psrld: case INS_psrlw: case INS_psrlq: { return (regNumber)2; } case INS_pslld: case INS_psllw: case INS_psllq: { return (regNumber)6; } case INS_psrad: case INS_psraw: { return (regNumber)4; } default: { assert(!"Invalid instruction for SSE2 instruction of the form: opcode reg, immed8"); return REG_NA; } } } bool emitter::IsAVXInstruction(instruction ins) const { return UseVEXEncoding() && IsSSEOrAVXInstruction(ins); } // Returns true if the AVX instruction is a binary operator that requires 3 operands. // When we emit an instruction with only two operands, we will duplicate the destination // as a source. // TODO-XArch-Cleanup: This is a temporary solution for now. Eventually this needs to // be formalized by adding an additional field to instruction table to // to indicate whether a 3-operand instruction. bool emitter::IsDstDstSrcAVXInstruction(instruction ins) { return ((CodeGenInterface::instInfo[ins] & INS_Flags_IsDstDstSrcAVXInstruction) != 0) && IsAVXInstruction(ins); } // Returns true if the AVX instruction requires 3 operands that duplicate the source // register in the vvvv field. // TODO-XArch-Cleanup: This is a temporary solution for now. Eventually this needs to // be formalized by adding an additional field to instruction table to // to indicate whether a 3-operand instruction. bool emitter::IsDstSrcSrcAVXInstruction(instruction ins) { return ((CodeGenInterface::instInfo[ins] & INS_Flags_IsDstSrcSrcAVXInstruction) != 0) && IsAVXInstruction(ins); } //------------------------------------------------------------------------ // HasRegularWideForm: Many x86/x64 instructions follow a regular encoding scheme where the // byte-sized version of an instruction has the lowest bit of the opcode cleared // while the 32-bit version of the instruction (taking potential prefixes to // override operand size) has the lowest bit set. This function returns true if // the instruction follows this format. // // Note that this bit is called `w` in the encoding table in Section B.2 of // Volume 2 of the Intel Architecture Software Developer Manual. // // Arguments: // ins - instruction to test // // Return Value: // true if instruction has a regular form where the 'w' bit needs to be set. bool emitter::HasRegularWideForm(instruction ins) { return ((CodeGenInterface::instInfo[ins] & INS_FLAGS_Has_Wbit) != 0); } //------------------------------------------------------------------------ // HasRegularWideImmediateForm: As above in HasRegularWideForm, many instructions taking // immediates have a regular form used to encode whether the instruction takes a sign-extended // 1-byte immediate or a (in 64-bit sign-extended) 4-byte immediate, by respectively setting and // clearing the second lowest bit. // // Note that this bit is called `s` in the encoding table in Section B.2 of // Volume 2 of the Intel Architecture Software Developer Manual. // // Arguments: // ins - instruction to test // // Return Value: // true if instruction has a regular wide immediate form where the 's' bit needs to set. bool emitter::HasRegularWideImmediateForm(instruction ins) { return ((CodeGenInterface::instInfo[ins] & INS_FLAGS_Has_Sbit) != 0); } //------------------------------------------------------------------------ // DoesWriteZeroFlag: check if the instruction write the // ZF flag. // // Arguments: // ins - instruction to test // // Return Value: // true if instruction writes the ZF flag, false otherwise. // bool emitter::DoesWriteZeroFlag(instruction ins) { return (CodeGenInterface::instInfo[ins] & Writes_ZF) != 0; } //------------------------------------------------------------------------ // DoesWriteSignFlag: check if the instruction writes the // SF flag. // // Arguments: // ins - instruction to test // // Return Value: // true if instruction writes the SF flag, false otherwise. // bool emitter::DoesWriteSignFlag(instruction ins) { return (CodeGenInterface::instInfo[ins] & Writes_SF) != 0; } //------------------------------------------------------------------------ // DoesResetOverflowAndCarryFlags: check if the instruction resets the // OF and CF flag to 0. // // Arguments: // ins - instruction to test // // Return Value: // true if instruction resets the OF and CF flag, false otherwise. // bool emitter::DoesResetOverflowAndCarryFlags(instruction ins) { return (CodeGenInterface::instInfo[ins] & (Resets_OF | Resets_CF)) == (Resets_OF | Resets_CF); } //------------------------------------------------------------------------ // IsFlagsAlwaysModified: check if the instruction guarantee to modify any flags. // // Arguments: // id - instruction to test // // Return Value: // false, if instruction is guaranteed to not modify any flag. // true, if instruction will modify some flag. // bool emitter::IsFlagsAlwaysModified(instrDesc* id) { instruction ins = id->idIns(); insFormat fmt = id->idInsFmt(); if (fmt == IF_RRW_SHF) { if (id->idIsLargeCns()) { return true; } else if (id->idSmallCns() == 0) { switch (ins) { // If shift-amount for below instructions is 0, then flags are unaffected. case INS_rcl_N: case INS_rcr_N: case INS_rol_N: case INS_ror_N: case INS_shl_N: case INS_shr_N: case INS_sar_N: return false; default: return true; } } } else if (fmt == IF_RRW) { switch (ins) { // If shift-amount for below instructions is 0, then flags are unaffected. // So, to be conservative, do not optimize if the instruction has register // as the shift-amount operand. case INS_rcl: case INS_rcr: case INS_rol: case INS_ror: case INS_shl: case INS_shr: case INS_sar: return false; default: return true; } } return true; } //------------------------------------------------------------------------ // AreUpper32BitsZero: check if some previously emitted // instruction set the upper 32 bits of reg to zero. // // Arguments: // reg - register of interest // // Return Value: // true if previous instruction zeroed reg's upper 32 bits. // false if it did not, or if we can't safely determine. // // Notes: // Currently only looks back one instruction. // // movsx eax, ... might seem viable but we always encode this // instruction with a 64 bit destination. See TakesRexWPrefix. bool emitter::AreUpper32BitsZero(regNumber reg) { // If there are no instructions in this IG, we can look back at // the previous IG's instructions if this IG is an extension. // if ((emitCurIGinsCnt == 0) && ((emitCurIG->igFlags & IGF_EXTEND) == 0)) { return false; } instrDesc* id = emitLastIns; insFormat fmt = id->idInsFmt(); // This isn't meant to be a comprehensive check. Just look for what // seems to be common. switch (fmt) { case IF_RWR_CNS: case IF_RRW_CNS: case IF_RRW_SHF: case IF_RWR_RRD: case IF_RRW_RRD: case IF_RWR_MRD: case IF_RWR_SRD: case IF_RWR_ARD: // Bail if not writing to the right register if (id->idReg1() != reg) { return false; } // Bail if movsx, we always have movsx sign extend to 8 bytes if (id->idIns() == INS_movsx) { return false; } #ifdef TARGET_AMD64 if (id->idIns() == INS_movsxd) { return false; } #endif // movzx always zeroes the upper 32 bits. if (id->idIns() == INS_movzx) { return true; } // Else rely on operation size. return (id->idOpSize() == EA_4BYTE); default: break; } return false; } //------------------------------------------------------------------------ // AreFlagsSetToZeroCmp: Checks if the previous instruction set the SZ, and optionally OC, flags to // the same values as if there were a compare to 0 // // Arguments: // reg - register of interest // opSize - size of register // treeOps - type of tree node operation // // Return Value: // true if the previous instruction set the flags for reg // false if not, or if we can't safely determine // // Notes: // Currently only looks back one instruction. bool emitter::AreFlagsSetToZeroCmp(regNumber reg, emitAttr opSize, genTreeOps treeOps) { assert(reg != REG_NA); if (!emitComp->opts.OptimizationEnabled()) { return false; } // Don't look back across IG boundaries (possible control flow) if (emitCurIGinsCnt == 0 && ((emitCurIG->igFlags & IGF_EXTEND) == 0)) { return false; } instrDesc* id = emitLastIns; instruction lastIns = id->idIns(); insFormat fmt = id->idInsFmt(); // make sure op1 is a reg switch (fmt) { case IF_RWR_CNS: case IF_RRW_CNS: case IF_RRW_SHF: case IF_RWR_RRD: case IF_RRW_RRD: case IF_RWR_MRD: case IF_RWR_SRD: case IF_RRW_SRD: case IF_RWR_ARD: case IF_RRW_ARD: case IF_RWR: case IF_RRD: case IF_RRW: case IF_RWR_RRD_RRD: case IF_RWR_RRD_MRD: case IF_RWR_RRD_ARD: case IF_RWR_RRD_SRD: break; default: return false; } if (id->idReg1() != reg) { return false; } // Certain instruction like and, or and xor modifies exactly same flags // as "test" instruction. // They reset OF and CF to 0 and modifies SF, ZF and PF. if (DoesResetOverflowAndCarryFlags(lastIns)) { return id->idOpSize() == opSize; } if ((treeOps == GT_EQ) || (treeOps == GT_NE)) { if (DoesWriteZeroFlag(lastIns) && IsFlagsAlwaysModified(id)) { return id->idOpSize() == opSize; } } return false; } //------------------------------------------------------------------------ // AreFlagsSetToForSignJumpOpt: checks if the previous instruction set the SF if the tree // node qualifies for a jg/jle to jns/js optimization // // Arguments: // reg - register of interest // opSize - size of register // relop - relational tree node // // Return Value: // true if the tree node qualifies for the jg/jle to jns/js optimization // false if not, or if we can't safely determine // // Notes: // Currently only looks back one instruction. bool emitter::AreFlagsSetForSignJumpOpt(regNumber reg, emitAttr opSize, GenTree* relop) { assert(reg != REG_NA); if (!emitComp->opts.OptimizationEnabled()) { return false; } // Don't look back across IG boundaries (possible control flow) if (emitCurIGinsCnt == 0 && ((emitCurIG->igFlags & IGF_EXTEND) == 0)) { return false; } instrDesc* id = emitLastIns; instruction lastIns = id->idIns(); insFormat fmt = id->idInsFmt(); // make sure op1 is a reg switch (fmt) { case IF_RWR_CNS: case IF_RRW_CNS: case IF_RRW_SHF: case IF_RWR_RRD: case IF_RRW_RRD: case IF_RWR_MRD: case IF_RWR_SRD: case IF_RRW_SRD: case IF_RWR_ARD: case IF_RRW_ARD: case IF_RWR: case IF_RRD: case IF_RRW: break; default: return false; } if (id->idReg1() != reg) { return false; } // If we have a GT_GE/GT_LT which generates an jge/jl, and the previous instruction // sets the SF, we can omit a test instruction and check for jns/js. if ((relop->OperGet() == GT_GE || relop->OperGet() == GT_LT) && !GenCondition::FromRelop(relop).IsUnsigned()) { if (DoesWriteSignFlag(lastIns) && IsFlagsAlwaysModified(id)) { return id->idOpSize() == opSize; } } return false; } //------------------------------------------------------------------------ // IsDstSrcImmAvxInstruction: Checks if the instruction has a "reg, reg/mem, imm" or // "reg/mem, reg, imm" form for the legacy, VEX, and EVEX // encodings. // // Arguments: // instruction -- processor instruction to check // // Return Value: // true if instruction has a "reg, reg/mem, imm" or "reg/mem, reg, imm" encoding // form for the legacy, VEX, and EVEX encodings. // // That is, the instruction takes two operands, one of which is immediate, and it // does not need to encode any data in the VEX.vvvv field. // static bool IsDstSrcImmAvxInstruction(instruction ins) { switch (ins) { case INS_aeskeygenassist: case INS_extractps: case INS_pextrb: case INS_pextrw: case INS_pextrd: case INS_pextrq: case INS_pshufd: case INS_pshufhw: case INS_pshuflw: case INS_roundpd: case INS_roundps: return true; default: return false; } } // ------------------------------------------------------------------- // Is4ByteSSEInstruction: Returns true if the SSE instruction is a 4-byte opcode. // // Arguments: // ins - instruction // // Note that this should be true for any of the instructions in instrsXArch.h // that use the SSE38 or SSE3A macro but returns false if the VEX encoding is // in use, since that encoding does not require an additional byte. bool emitter::Is4ByteSSEInstruction(instruction ins) { return !UseVEXEncoding() && EncodedBySSE38orSSE3A(ins); } // Returns true if this instruction requires a VEX prefix // All AVX instructions require a VEX prefix bool emitter::TakesVexPrefix(instruction ins) const { // special case vzeroupper as it requires 2-byte VEX prefix // special case the fencing, movnti and the prefetch instructions as they never take a VEX prefix switch (ins) { case INS_lfence: case INS_mfence: case INS_movnti: case INS_prefetchnta: case INS_prefetcht0: case INS_prefetcht1: case INS_prefetcht2: case INS_sfence: case INS_vzeroupper: return false; default: break; } return IsAVXInstruction(ins); } // Add base VEX prefix without setting W, R, X, or B bits // L bit will be set based on emitter attr. // // 2-byte VEX prefix = C5 <R,vvvv,L,pp> // 3-byte VEX prefix = C4 <R,X,B,m-mmmm> <W,vvvv,L,pp> // - R, X, B, W - bits to express corresponding REX prefixes // - m-mmmmm (5-bit) // 0-00001 - implied leading 0F opcode byte // 0-00010 - implied leading 0F 38 opcode bytes // 0-00011 - implied leading 0F 3A opcode bytes // Rest - reserved for future use and usage of them will uresult in Undefined instruction exception // // - vvvv (4-bits) - register specifier in 1's complement form; must be 1111 if unused // - L - scalar or AVX-128 bit operations (L=0), 256-bit operations (L=1) // - pp (2-bits) - opcode extension providing equivalent functionality of a SIMD size prefix // these prefixes are treated mandatory when used with escape opcode 0Fh for // some SIMD instructions // 00 - None (0F - packed float) // 01 - 66 (66 0F - packed double) // 10 - F3 (F3 0F - scalar float // 11 - F2 (F2 0F - scalar double) #define DEFAULT_3BYTE_VEX_PREFIX 0xC4E07800000000ULL #define DEFAULT_3BYTE_VEX_PREFIX_MASK 0xFFFFFF00000000ULL #define LBIT_IN_3BYTE_VEX_PREFIX 0x00000400000000ULL emitter::code_t emitter::AddVexPrefix(instruction ins, code_t code, emitAttr attr) { // The 2-byte VEX encoding is preferred when possible, but actually emitting // it depends on a number of factors that we may not know until much later. // // In order to handle this "easily", we just carry the 3-byte encoding all // the way through and "fix-up" the encoding when the VEX prefix is actually // emitted, by simply checking that all the requirements were met. // Only AVX instructions require VEX prefix assert(IsAVXInstruction(ins)); // Shouldn't have already added VEX prefix assert(!hasVexPrefix(code)); assert((code & DEFAULT_3BYTE_VEX_PREFIX_MASK) == 0); code |= DEFAULT_3BYTE_VEX_PREFIX; if (attr == EA_32BYTE) { // Set L bit to 1 in case of instructions that operate on 256-bits. code |= LBIT_IN_3BYTE_VEX_PREFIX; } return code; } // Returns true if this instruction, for the given EA_SIZE(attr), will require a REX.W prefix bool emitter::TakesRexWPrefix(instruction ins, emitAttr attr) { // Because the current implementation of AVX does not have a way to distinguish between the register // size specification (128 vs. 256 bits) and the operand size specification (32 vs. 64 bits), where both are // required, the instruction must be created with the register size attribute (EA_16BYTE or EA_32BYTE), // and here we must special case these by the opcode. switch (ins) { case INS_vpermpd: case INS_vpermq: case INS_vpsrlvq: case INS_vpsllvq: case INS_pinsrq: case INS_pextrq: case INS_vfmadd132pd: case INS_vfmadd213pd: case INS_vfmadd231pd: case INS_vfmadd132sd: case INS_vfmadd213sd: case INS_vfmadd231sd: case INS_vfmaddsub132pd: case INS_vfmaddsub213pd: case INS_vfmaddsub231pd: case INS_vfmsubadd132pd: case INS_vfmsubadd213pd: case INS_vfmsubadd231pd: case INS_vfmsub132pd: case INS_vfmsub213pd: case INS_vfmsub231pd: case INS_vfmsub132sd: case INS_vfmsub213sd: case INS_vfmsub231sd: case INS_vfnmadd132pd: case INS_vfnmadd213pd: case INS_vfnmadd231pd: case INS_vfnmadd132sd: case INS_vfnmadd213sd: case INS_vfnmadd231sd: case INS_vfnmsub132pd: case INS_vfnmsub213pd: case INS_vfnmsub231pd: case INS_vfnmsub132sd: case INS_vfnmsub213sd: case INS_vfnmsub231sd: case INS_vpmaskmovq: case INS_vpgatherdq: case INS_vpgatherqq: case INS_vgatherdpd: case INS_vgatherqpd: return true; default: break; } #ifdef TARGET_AMD64 // movsx should always sign extend out to 8 bytes just because we don't track // whether the dest should be 4 bytes or 8 bytes (attr indicates the size // of the source, not the dest). // A 4-byte movzx is equivalent to an 8 byte movzx, so it is not special // cased here. if (ins == INS_movsx) { return true; } if (EA_SIZE(attr) != EA_8BYTE) { return false; } if (IsSSEOrAVXInstruction(ins)) { switch (ins) { case INS_movd: // TODO-Cleanup: replace with movq, https://github.com/dotnet/runtime/issues/47943. case INS_andn: case INS_bextr: case INS_blsi: case INS_blsmsk: case INS_blsr: case INS_bzhi: case INS_cvttsd2si: case INS_cvttss2si: case INS_cvtsd2si: case INS_cvtss2si: case INS_cvtsi2sd: case INS_cvtsi2ss: case INS_movnti: case INS_mulx: case INS_pdep: case INS_pext: case INS_rorx: return true; default: return false; } } // TODO-XArch-Cleanup: Better way to not emit REX.W when we don't need it, than just testing all these // opcodes... // These are all the instructions that default to 8-byte operand without the REX.W bit // With 1 special case: movzx because the 4 byte version still zeros-out the hi 4 bytes // so we never need it if ((ins != INS_push) && (ins != INS_pop) && (ins != INS_movq) && (ins != INS_movzx) && (ins != INS_push_hide) && (ins != INS_pop_hide) && (ins != INS_ret) && (ins != INS_call) && (ins != INS_tail_i_jmp) && !((ins >= INS_i_jmp) && (ins <= INS_l_jg))) { return true; } else { return false; } #else //! TARGET_AMD64 = TARGET_X86 return false; #endif //! TARGET_AMD64 } // Returns true if using this register will require a REX.* prefix. // Since XMM registers overlap with YMM registers, this routine // can also be used to know whether a YMM register if the // instruction in question is AVX. bool IsExtendedReg(regNumber reg) { #ifdef TARGET_AMD64 return ((reg >= REG_R8) && (reg <= REG_R15)) || ((reg >= REG_XMM8) && (reg <= REG_XMM15)); #else // X86 JIT operates in 32-bit mode and hence extended reg are not available. return false; #endif } // Returns true if using this register, for the given EA_SIZE(attr), will require a REX.* prefix bool IsExtendedReg(regNumber reg, emitAttr attr) { #ifdef TARGET_AMD64 // Not a register, so doesn't need a prefix if (reg > REG_XMM15) { return false; } // Opcode field only has 3 bits for the register, these high registers // need a 4th bit, that comes from the REX prefix (eiter REX.X, REX.R, or REX.B) if (IsExtendedReg(reg)) { return true; } if (EA_SIZE(attr) != EA_1BYTE) { return false; } // There are 12 one byte registers addressible 'below' r8b: // al, cl, dl, bl, ah, ch, dh, bh, spl, bpl, sil, dil. // The first 4 are always addressible, the last 8 are divided into 2 sets: // ah, ch, dh, bh // -- or -- // spl, bpl, sil, dil // Both sets are encoded exactly the same, the difference is the presence // of a REX prefix, even a REX prefix with no other bits set (0x40). // So in order to get to the second set we need a REX prefix (but no bits). // // TODO-AMD64-CQ: if we ever want to start using the first set, we'll need a different way of // encoding/tracking/encoding registers. return (reg >= REG_RSP); #else // X86 JIT operates in 32-bit mode and hence extended reg are not available. return false; #endif } // Since XMM registers overlap with YMM registers, this routine // can also used to know whether a YMM register in case of AVX instructions. bool IsXMMReg(regNumber reg) { #ifdef TARGET_AMD64 return (reg >= REG_XMM0) && (reg <= REG_XMM15); #else // !TARGET_AMD64 return (reg >= REG_XMM0) && (reg <= REG_XMM7); #endif // !TARGET_AMD64 } // Returns bits to be encoded in instruction for the given register. unsigned RegEncoding(regNumber reg) { static_assert((REG_XMM0 & 0x7) == 0, "bad XMMBASE"); return (unsigned)(reg & 0x7); } // Utility routines that abstract the logic of adding REX.W, REX.R, REX.X, REX.B and REX prefixes // SSE2: separate 1-byte prefix gets added before opcode. // AVX: specific bits within VEX prefix need to be set in bit-inverted form. emitter::code_t emitter::AddRexWPrefix(instruction ins, code_t code) { if (UseVEXEncoding() && IsAVXInstruction(ins)) { if (TakesVexPrefix(ins)) { // W-bit is available only in 3-byte VEX prefix that starts with byte C4. assert(hasVexPrefix(code)); // W-bit is the only bit that is added in non bit-inverted form. return emitter::code_t(code | 0x00008000000000ULL); } } #ifdef TARGET_AMD64 return emitter::code_t(code | 0x4800000000ULL); #else assert(!"UNREACHED"); return code; #endif } #ifdef TARGET_AMD64 emitter::code_t emitter::AddRexRPrefix(instruction ins, code_t code) { if (UseVEXEncoding() && IsAVXInstruction(ins)) { if (TakesVexPrefix(ins)) { // R-bit is supported by both 2-byte and 3-byte VEX prefix assert(hasVexPrefix(code)); // R-bit is added in bit-inverted form. return code & 0xFF7FFFFFFFFFFFULL; } } return code | 0x4400000000ULL; } emitter::code_t emitter::AddRexXPrefix(instruction ins, code_t code) { if (UseVEXEncoding() && IsAVXInstruction(ins)) { if (TakesVexPrefix(ins)) { // X-bit is available only in 3-byte VEX prefix that starts with byte C4. assert(hasVexPrefix(code)); // X-bit is added in bit-inverted form. return code & 0xFFBFFFFFFFFFFFULL; } } return code | 0x4200000000ULL; } emitter::code_t emitter::AddRexBPrefix(instruction ins, code_t code) { if (UseVEXEncoding() && IsAVXInstruction(ins)) { if (TakesVexPrefix(ins)) { // B-bit is available only in 3-byte VEX prefix that starts with byte C4. assert(hasVexPrefix(code)); // B-bit is added in bit-inverted form. return code & 0xFFDFFFFFFFFFFFULL; } } return code | 0x4100000000ULL; } // Adds REX prefix (0x40) without W, R, X or B bits set emitter::code_t emitter::AddRexPrefix(instruction ins, code_t code) { assert(!UseVEXEncoding() || !IsAVXInstruction(ins)); return code | 0x4000000000ULL; } #endif // TARGET_AMD64 bool isPrefix(BYTE b) { assert(b != 0); // Caller should check this assert(b != 0x67); // We don't use the address size prefix assert(b != 0x65); // The GS segment override prefix is emitted separately assert(b != 0x64); // The FS segment override prefix is emitted separately assert(b != 0xF0); // The lock prefix is emitted separately assert(b != 0x2E); // We don't use the CS segment override prefix assert(b != 0x3E); // Or the DS segment override prefix assert(b != 0x26); // Or the ES segment override prefix assert(b != 0x36); // Or the SS segment override prefix // That just leaves the size prefixes used in SSE opcodes: // Scalar Double Scalar Single Packed Double return ((b == 0xF2) || (b == 0xF3) || (b == 0x66)); } // Outputs VEX prefix (in case of AVX instructions) and REX.R/X/W/B otherwise. unsigned emitter::emitOutputRexOrVexPrefixIfNeeded(instruction ins, BYTE* dst, code_t& code) { if (hasVexPrefix(code)) { // Only AVX instructions should have a VEX prefix assert(UseVEXEncoding() && IsAVXInstruction(ins)); code_t vexPrefix = (code >> 32) & 0x00FFFFFF; code &= 0x00000000FFFFFFFFLL; WORD leadingBytes = 0; BYTE check = (code >> 24) & 0xFF; if (check != 0) { // 3-byte opcode: with the bytes ordered as 0x2211RM33 or // 4-byte opcode: with the bytes ordered as 0x22114433 // check for a prefix in the 11 position BYTE sizePrefix = (code >> 16) & 0xFF; if ((sizePrefix != 0) && isPrefix(sizePrefix)) { // 'pp' bits in byte2 of VEX prefix allows us to encode SIMD size prefixes as two bits // // 00 - None (0F - packed float) // 01 - 66 (66 0F - packed double) // 10 - F3 (F3 0F - scalar float // 11 - F2 (F2 0F - scalar double) switch (sizePrefix) { case 0x66: if (IsBMIInstruction(ins)) { switch (ins) { case INS_rorx: case INS_pdep: case INS_mulx: { vexPrefix |= 0x03; break; } case INS_pext: { vexPrefix |= 0x02; break; } default: { vexPrefix |= 0x00; break; } } } else { vexPrefix |= 0x01; } break; case 0xF3: vexPrefix |= 0x02; break; case 0xF2: vexPrefix |= 0x03; break; default: assert(!"unrecognized SIMD size prefix"); unreached(); } // Now the byte in the 22 position must be an escape byte 0F leadingBytes = check; assert(leadingBytes == 0x0F); // Get rid of both sizePrefix and escape byte code &= 0x0000FFFFLL; // Check the byte in the 33 position to see if it is 3A or 38. // In such a case escape bytes must be 0x0F3A or 0x0F38 check = code & 0xFF; if (check == 0x3A || check == 0x38) { leadingBytes = (leadingBytes << 8) | check; code &= 0x0000FF00LL; } } } else { // 2-byte opcode with the bytes ordered as 0x0011RM22 // the byte in position 11 must be an escape byte. leadingBytes = (code >> 16) & 0xFF; assert(leadingBytes == 0x0F || leadingBytes == 0x00); code &= 0xFFFF; } // If there is an escape byte it must be 0x0F or 0x0F3A or 0x0F38 // m-mmmmm bits in byte 1 of VEX prefix allows us to encode these // implied leading bytes. 0x0F is supported by both the 2-byte and // 3-byte encoding. While 0x0F3A and 0x0F38 are only supported by // the 3-byte version. switch (leadingBytes) { case 0x00: // there is no leading byte break; case 0x0F: vexPrefix |= 0x0100; break; case 0x0F38: vexPrefix |= 0x0200; break; case 0x0F3A: vexPrefix |= 0x0300; break; default: assert(!"encountered unknown leading bytes"); unreached(); } // At this point // VEX.2211RM33 got transformed as VEX.0000RM33 // VEX.0011RM22 got transformed as VEX.0000RM22 // // Now output VEX prefix leaving the 4-byte opcode // The 2-byte VEX encoding, requires that the X and B-bits are set (these // bits are inverted from the REX values so set means off), the W-bit is // not set (this bit is not inverted), and that the m-mmmm bits are 0-0001 // (the 2-byte VEX encoding only supports the 0x0F leading byte). When these // conditions are met, we can change byte-0 from 0xC4 to 0xC5 and then // byte-1 is the logical-or of bit 7 from byte-1 and bits 0-6 from byte 2 // from the 3-byte VEX encoding. // // Given the above, the check can be reduced to a simple mask and comparison. // * 0xFFFF7F80 is a mask that ignores any bits whose value we don't care about: // * R can be set or unset (0x7F ignores bit 7) // * vvvv can be any value (0x80 ignores bits 3-6) // * L can be set or unset (0x80 ignores bit 2) // * pp can be any value (0x80 ignores bits 0-1) // * 0x00C46100 is a value that signifies the requirements listed above were met: // * We must be a three-byte VEX opcode (0x00C4) // * X and B must be set (0x61 validates bits 5-6) // * m-mmmm must be 0-00001 (0x61 validates bits 0-4) // * W must be unset (0x00 validates bit 7) if ((vexPrefix & 0xFFFF7F80) == 0x00C46100) { // Encoding optimization calculation is not done while estimating the instruction // size and thus over-predict instruction size by 1 byte. // If there are IGs that will be aligned, do not optimize encoding so the // estimated alignment sizes are accurate. if (emitCurIG->igNum > emitLastAlignedIgNum) { emitOutputByte(dst, 0xC5); emitOutputByte(dst + 1, ((vexPrefix >> 8) & 0x80) | (vexPrefix & 0x7F)); return 2; } } emitOutputByte(dst, ((vexPrefix >> 16) & 0xFF)); emitOutputByte(dst + 1, ((vexPrefix >> 8) & 0xFF)); emitOutputByte(dst + 2, vexPrefix & 0xFF); return 3; } #ifdef TARGET_AMD64 if (code > 0x00FFFFFFFFLL) { BYTE prefix = (code >> 32) & 0xFF; noway_assert(prefix >= 0x40 && prefix <= 0x4F); code &= 0x00000000FFFFFFFFLL; // TODO-AMD64-Cleanup: when we remove the prefixes (just the SSE opcodes right now) // we can remove this code as well // The REX prefix is required to come after all other prefixes. // Some of our 'opcodes' actually include some prefixes, if that // is the case, shift them over and place the REX prefix after // the other prefixes, and emit any prefix that got moved out. BYTE check = (code >> 24) & 0xFF; if (check == 0) { // 3-byte opcode: with the bytes ordered as 0x00113322 // check for a prefix in the 11 position check = (code >> 16) & 0xFF; if (check != 0 && isPrefix(check)) { // Swap the rex prefix and whatever this prefix is code = (((DWORD)prefix << 16) | (code & 0x0000FFFFLL)); // and then emit the other prefix return emitOutputByte(dst, check); } } else { // 4-byte opcode with the bytes ordered as 0x22114433 // first check for a prefix in the 11 position BYTE check2 = (code >> 16) & 0xFF; if (isPrefix(check2)) { assert(!isPrefix(check)); // We currently don't use this, so it is untested if (isPrefix(check)) { // 3 prefixes were rex = rr, check = c1, check2 = c2 encoded as 0xrrc1c2XXXX // Change to c2rrc1XXXX, and emit check2 now code = (((code_t)prefix << 24) | ((code_t)check << 16) | (code & 0x0000FFFFLL)); } else { // 2 prefixes were rex = rr, check2 = c2 encoded as 0xrrXXc2XXXX, (check is part of the opcode) // Change to c2XXrrXXXX, and emit check2 now code = (((code_t)check << 24) | ((code_t)prefix << 16) | (code & 0x0000FFFFLL)); } return emitOutputByte(dst, check2); } } return emitOutputByte(dst, prefix); } #endif // TARGET_AMD64 return 0; } #ifdef TARGET_AMD64 /***************************************************************************** * Is the last instruction emitted a call instruction? */ bool emitter::emitIsLastInsCall() { if ((emitLastIns != nullptr) && (emitLastIns->idIns() == INS_call)) { return true; } return false; } /***************************************************************************** * We're about to create an epilog. If the last instruction we output was a 'call', * then we need to insert a NOP, to allow for proper exception-handling behavior. */ void emitter::emitOutputPreEpilogNOP() { if (emitIsLastInsCall()) { emitIns(INS_nop); } } #endif // TARGET_AMD64 // Size of rex prefix in bytes unsigned emitter::emitGetRexPrefixSize(instruction ins) { // In case of AVX instructions, REX prefixes are part of VEX prefix. // And hence requires no additional byte to encode REX prefixes. if (IsAVXInstruction(ins)) { return 0; } // If not AVX, then we would need 1-byte to encode REX prefix. return 1; } // Size of vex prefix in bytes unsigned emitter::emitGetVexPrefixSize(instruction ins, emitAttr attr) { if (IsAVXInstruction(ins)) { return 3; } // If not AVX, then we don't need to encode vex prefix. return 0; } //------------------------------------------------------------------------ // emitGetAdjustedSize: Determines any size adjustment needed for a given instruction based on the current // configuration. // // Arguments: // ins -- The instruction being emitted // attr -- The emit attribute // code -- The current opcode and any known prefixes unsigned emitter::emitGetAdjustedSize(instruction ins, emitAttr attr, code_t code) { unsigned adjustedSize = 0; if (IsAVXInstruction(ins)) { // VEX prefix encodes some bytes of the opcode and as a result, overall size of the instruction reduces. // Therefore, to estimate the size adding VEX prefix size and size of instruction opcode bytes will always // overstimate. // Instead this routine will adjust the size of VEX prefix based on the number of bytes of opcode it encodes so // that // instruction size estimate will be accurate. // Basically this will decrease the vexPrefixSize, so that opcodeSize + vexPrefixAdjustedSize will be the right // size. // // rightOpcodeSize + vexPrefixSize // = (opcodeSize - ExtrabytesSize) + vexPrefixSize // = opcodeSize + (vexPrefixSize - ExtrabytesSize) // = opcodeSize + vexPrefixAdjustedSize unsigned vexPrefixAdjustedSize = emitGetVexPrefixSize(ins, attr); assert(vexPrefixAdjustedSize == 3); // In this case, opcode will contains escape prefix at least one byte, // vexPrefixAdjustedSize should be minus one. vexPrefixAdjustedSize -= 1; // Get the fourth byte in Opcode. // If this byte is non-zero, then we should check whether the opcode contains SIMD prefix or not. BYTE check = (code >> 24) & 0xFF; if (check != 0) { // 3-byte opcode: with the bytes ordered as 0x2211RM33 or // 4-byte opcode: with the bytes ordered as 0x22114433 // Simd prefix is at the first byte. BYTE sizePrefix = (code >> 16) & 0xFF; if (sizePrefix != 0 && isPrefix(sizePrefix)) { vexPrefixAdjustedSize -= 1; } // If the opcode size is 4 bytes, then the second escape prefix is at fourth byte in opcode. // But in this case the opcode has not counted R\M part. // opcodeSize + VexPrefixAdjustedSize - ExtraEscapePrefixSize + ModR\MSize //=opcodeSize + VexPrefixAdjustedSize -1 + 1 //=opcodeSize + VexPrefixAdjustedSize // So although we may have second byte escape prefix, we won't decrease vexPrefixAdjustedSize. } adjustedSize = vexPrefixAdjustedSize; } else if (Is4ByteSSEInstruction(ins)) { // The 4-Byte SSE instructions require one additional byte to hold the ModRM byte adjustedSize++; } else { if (ins == INS_crc32) { // Adjust code size for CRC32 that has 4-byte opcode but does not use SSE38 or EES3A encoding. adjustedSize++; } if ((attr == EA_2BYTE) && (ins != INS_movzx) && (ins != INS_movsx)) { // Most 16-bit operand instructions will need a 0x66 prefix. adjustedSize++; } } return adjustedSize; } // //------------------------------------------------------------------------ // emitGetPrefixSize: Get size of rex or vex prefix emitted in code // // Arguments: // code -- The current opcode and any known prefixes // includeRexPrefixSize -- If Rex Prefix size should be included or not // unsigned emitter::emitGetPrefixSize(code_t code, bool includeRexPrefixSize) { if (hasVexPrefix(code)) { return 3; } if (includeRexPrefixSize && hasRexPrefix(code)) { return 1; } return 0; } #ifdef TARGET_X86 /***************************************************************************** * * Record a non-empty stack */ void emitter::emitMarkStackLvl(unsigned stackLevel) { assert(int(stackLevel) >= 0); assert(emitCurStackLvl == 0); assert(emitCurIG->igStkLvl == 0); assert(emitCurIGfreeNext == emitCurIGfreeBase); assert(stackLevel && stackLevel % sizeof(int) == 0); emitCurStackLvl = emitCurIG->igStkLvl = stackLevel; if (emitMaxStackDepth < emitCurStackLvl) { JITDUMP("Upping emitMaxStackDepth from %d to %d\n", emitMaxStackDepth, emitCurStackLvl); emitMaxStackDepth = emitCurStackLvl; } } #endif /***************************************************************************** * * Get hold of the address mode displacement value for an indirect call. */ inline ssize_t emitter::emitGetInsCIdisp(instrDesc* id) { if (id->idIsLargeCall()) { return ((instrDescCGCA*)id)->idcDisp; } else { assert(!id->idIsLargeDsp()); assert(!id->idIsLargeCns()); return id->idAddr()->iiaAddrMode.amDisp; } } /** *************************************************************************** * * The following table is used by the instIsFP()/instUse/DefFlags() helpers. */ // clang-format off const insFlags CodeGenInterface::instInfo[] = { #define INST0(id, nm, um, mr, flags) static_cast<insFlags>(flags), #define INST1(id, nm, um, mr, flags) static_cast<insFlags>(flags), #define INST2(id, nm, um, mr, mi, flags) static_cast<insFlags>(flags), #define INST3(id, nm, um, mr, mi, rm, flags) static_cast<insFlags>(flags), #define INST4(id, nm, um, mr, mi, rm, a4, flags) static_cast<insFlags>(flags), #define INST5(id, nm, um, mr, mi, rm, a4, rr, flags) static_cast<insFlags>(flags), #include "instrs.h" #undef INST0 #undef INST1 #undef INST2 #undef INST3 #undef INST4 #undef INST5 }; // clang-format on /***************************************************************************** * * Initialize the table used by emitInsModeFormat(). */ // clang-format off const BYTE emitter::emitInsModeFmtTab[] = { #define INST0(id, nm, um, mr, flags) um, #define INST1(id, nm, um, mr, flags) um, #define INST2(id, nm, um, mr, mi, flags) um, #define INST3(id, nm, um, mr, mi, rm, flags) um, #define INST4(id, nm, um, mr, mi, rm, a4, flags) um, #define INST5(id, nm, um, mr, mi, rm, a4, rr, flags) um, #include "instrs.h" #undef INST0 #undef INST1 #undef INST2 #undef INST3 #undef INST4 #undef INST5 }; // clang-format on #ifdef DEBUG unsigned const emitter::emitInsModeFmtCnt = ArrLen(emitInsModeFmtTab); #endif /***************************************************************************** * * Combine the given base format with the update mode of the instuction. */ inline emitter::insFormat emitter::emitInsModeFormat(instruction ins, insFormat base) { assert(IF_RRD + IUM_RD == IF_RRD); assert(IF_RRD + IUM_WR == IF_RWR); assert(IF_RRD + IUM_RW == IF_RRW); return (insFormat)(base + emitInsUpdateMode(ins)); } // This is a helper we need due to Vs Whidbey #254016 in order to distinguish // if we can not possibly be updating an integer register. This is not the best // solution, but the other ones (see bug) are going to be much more complicated. bool emitter::emitInsCanOnlyWriteSSE2OrAVXReg(instrDesc* id) { instruction ins = id->idIns(); if (!IsSSEOrAVXInstruction(ins)) { return false; } switch (ins) { case INS_andn: case INS_bextr: case INS_blsi: case INS_blsmsk: case INS_blsr: case INS_bzhi: case INS_cvttsd2si: case INS_cvttss2si: case INS_cvtsd2si: case INS_cvtss2si: case INS_extractps: case INS_movd: case INS_movmskpd: case INS_movmskps: case INS_mulx: case INS_pdep: case INS_pext: case INS_pmovmskb: case INS_pextrb: case INS_pextrd: case INS_pextrq: case INS_pextrw: case INS_pextrw_sse41: case INS_rorx: { // These SSE instructions write to a general purpose integer register. return false; } default: { return true; } } } /***************************************************************************** * * Returns the base encoding of the given CPU instruction. */ inline size_t insCode(instruction ins) { // clang-format off const static size_t insCodes[] = { #define INST0(id, nm, um, mr, flags) mr, #define INST1(id, nm, um, mr, flags) mr, #define INST2(id, nm, um, mr, mi, flags) mr, #define INST3(id, nm, um, mr, mi, rm, flags) mr, #define INST4(id, nm, um, mr, mi, rm, a4, flags) mr, #define INST5(id, nm, um, mr, mi, rm, a4, rr, flags) mr, #include "instrs.h" #undef INST0 #undef INST1 #undef INST2 #undef INST3 #undef INST4 #undef INST5 }; // clang-format on assert((unsigned)ins < ArrLen(insCodes)); assert((insCodes[ins] != BAD_CODE)); return insCodes[ins]; } /***************************************************************************** * * Returns the "AL/AX/EAX, imm" accumulator encoding of the given instruction. */ inline size_t insCodeACC(instruction ins) { // clang-format off const static size_t insCodesACC[] = { #define INST0(id, nm, um, mr, flags) #define INST1(id, nm, um, mr, flags) #define INST2(id, nm, um, mr, mi, flags) #define INST3(id, nm, um, mr, mi, rm, flags) #define INST4(id, nm, um, mr, mi, rm, a4, flags) a4, #define INST5(id, nm, um, mr, mi, rm, a4, rr, flags) a4, #include "instrs.h" #undef INST0 #undef INST1 #undef INST2 #undef INST3 #undef INST4 #undef INST5 }; // clang-format on assert((unsigned)ins < ArrLen(insCodesACC)); assert((insCodesACC[ins] != BAD_CODE)); return insCodesACC[ins]; } /***************************************************************************** * * Returns the "register" encoding of the given CPU instruction. */ inline size_t insCodeRR(instruction ins) { // clang-format off const static size_t insCodesRR[] = { #define INST0(id, nm, um, mr, flags) #define INST1(id, nm, um, mr, flags) #define INST2(id, nm, um, mr, mi, flags) #define INST3(id, nm, um, mr, mi, rm, flags) #define INST4(id, nm, um, mr, mi, rm, a4, flags) #define INST5(id, nm, um, mr, mi, rm, a4, rr, flags) rr, #include "instrs.h" #undef INST0 #undef INST1 #undef INST2 #undef INST3 #undef INST4 #undef INST5 }; // clang-format on assert((unsigned)ins < ArrLen(insCodesRR)); assert((insCodesRR[ins] != BAD_CODE)); return insCodesRR[ins]; } // clang-format off const static size_t insCodesRM[] = { #define INST0(id, nm, um, mr, flags) #define INST1(id, nm, um, mr, flags) #define INST2(id, nm, um, mr, mi, flags) #define INST3(id, nm, um, mr, mi, rm, flags) rm, #define INST4(id, nm, um, mr, mi, rm, a4, flags) rm, #define INST5(id, nm, um, mr, mi, rm, a4, rr, flags) rm, #include "instrs.h" #undef INST0 #undef INST1 #undef INST2 #undef INST3 #undef INST4 #undef INST5 }; // clang-format on // Returns true iff the give CPU instruction has an RM encoding. inline bool hasCodeRM(instruction ins) { assert((unsigned)ins < ArrLen(insCodesRM)); return ((insCodesRM[ins] != BAD_CODE)); } /***************************************************************************** * * Returns the "reg, [r/m]" encoding of the given CPU instruction. */ inline size_t insCodeRM(instruction ins) { assert((unsigned)ins < ArrLen(insCodesRM)); assert((insCodesRM[ins] != BAD_CODE)); return insCodesRM[ins]; } // clang-format off const static size_t insCodesMI[] = { #define INST0(id, nm, um, mr, flags) #define INST1(id, nm, um, mr, flags) #define INST2(id, nm, um, mr, mi, flags) mi, #define INST3(id, nm, um, mr, mi, rm, flags) mi, #define INST4(id, nm, um, mr, mi, rm, a4, flags) mi, #define INST5(id, nm, um, mr, mi, rm, a4, rr, flags) mi, #include "instrs.h" #undef INST0 #undef INST1 #undef INST2 #undef INST3 #undef INST4 #undef INST5 }; // clang-format on // Returns true iff the give CPU instruction has an MI encoding. inline bool hasCodeMI(instruction ins) { assert((unsigned)ins < ArrLen(insCodesMI)); return ((insCodesMI[ins] != BAD_CODE)); } /***************************************************************************** * * Returns the "[r/m], 32-bit icon" encoding of the given CPU instruction. */ inline size_t insCodeMI(instruction ins) { assert((unsigned)ins < ArrLen(insCodesMI)); assert((insCodesMI[ins] != BAD_CODE)); return insCodesMI[ins]; } // clang-format off const static size_t insCodesMR[] = { #define INST0(id, nm, um, mr, flags) #define INST1(id, nm, um, mr, flags) mr, #define INST2(id, nm, um, mr, mi, flags) mr, #define INST3(id, nm, um, mr, mi, rm, flags) mr, #define INST4(id, nm, um, mr, mi, rm, a4, flags) mr, #define INST5(id, nm, um, mr, mi, rm, a4, rr, flags) mr, #include "instrs.h" #undef INST0 #undef INST1 #undef INST2 #undef INST3 #undef INST4 #undef INST5 }; // clang-format on // Returns true iff the give CPU instruction has an MR encoding. inline bool hasCodeMR(instruction ins) { assert((unsigned)ins < ArrLen(insCodesMR)); return ((insCodesMR[ins] != BAD_CODE)); } /***************************************************************************** * * Returns the "[r/m], reg" or "[r/m]" encoding of the given CPU instruction. */ inline size_t insCodeMR(instruction ins) { assert((unsigned)ins < ArrLen(insCodesMR)); assert((insCodesMR[ins] != BAD_CODE)); return insCodesMR[ins]; } // Return true if the instruction uses the SSE38 or SSE3A macro in instrsXArch.h. bool emitter::EncodedBySSE38orSSE3A(instruction ins) { const size_t SSE38 = 0x0F660038; const size_t SSE3A = 0x0F66003A; const size_t MASK = 0xFFFF00FF; size_t insCode = 0; if (!IsSSEOrAVXInstruction(ins)) { return false; } if (hasCodeRM(ins)) { insCode = insCodeRM(ins); } else if (hasCodeMI(ins)) { insCode = insCodeMI(ins); } else if (hasCodeMR(ins)) { insCode = insCodeMR(ins); } insCode &= MASK; return insCode == SSE38 || insCode == SSE3A; } /***************************************************************************** * * Returns an encoding for the specified register to be used in the bit0-2 * part of an opcode. */ inline unsigned emitter::insEncodeReg012(instruction ins, regNumber reg, emitAttr size, code_t* code) { assert(reg < REG_STK); #ifdef TARGET_AMD64 // Either code is not NULL or reg is not an extended reg. // If reg is an extended reg, instruction needs to be prefixed with 'REX' // which would require code != NULL. assert(code != nullptr || !IsExtendedReg(reg)); if (IsExtendedReg(reg)) { *code = AddRexBPrefix(ins, *code); // REX.B } else if ((EA_SIZE(size) == EA_1BYTE) && (reg > REG_RBX) && (code != nullptr)) { // We are assuming that we only use/encode SPL, BPL, SIL and DIL // not the corresponding AH, CH, DH, or BH *code = AddRexPrefix(ins, *code); // REX } #endif // TARGET_AMD64 unsigned regBits = RegEncoding(reg); assert(regBits < 8); return regBits; } /***************************************************************************** * * Returns an encoding for the specified register to be used in the bit3-5 * part of an opcode. */ inline unsigned emitter::insEncodeReg345(instruction ins, regNumber reg, emitAttr size, code_t* code) { assert(reg < REG_STK); #ifdef TARGET_AMD64 // Either code is not NULL or reg is not an extended reg. // If reg is an extended reg, instruction needs to be prefixed with 'REX' // which would require code != NULL. assert(code != nullptr || !IsExtendedReg(reg)); if (IsExtendedReg(reg)) { *code = AddRexRPrefix(ins, *code); // REX.R } else if ((EA_SIZE(size) == EA_1BYTE) && (reg > REG_RBX) && (code != nullptr)) { // We are assuming that we only use/encode SPL, BPL, SIL and DIL // not the corresponding AH, CH, DH, or BH *code = AddRexPrefix(ins, *code); // REX } #endif // TARGET_AMD64 unsigned regBits = RegEncoding(reg); assert(regBits < 8); return (regBits << 3); } /*********************************************************************************** * * Returns modified AVX opcode with the specified register encoded in bits 3-6 of * byte 2 of VEX prefix. */ inline emitter::code_t emitter::insEncodeReg3456(instruction ins, regNumber reg, emitAttr size, code_t code) { assert(reg < REG_STK); assert(IsAVXInstruction(ins)); assert(hasVexPrefix(code)); // Get 4-bit register encoding // RegEncoding() gives lower 3 bits // IsExtendedReg() gives MSB. code_t regBits = RegEncoding(reg); if (IsExtendedReg(reg)) { regBits |= 0x08; } // VEX prefix encodes register operand in 1's complement form // Shift count = 4-bytes of opcode + 0-2 bits assert(regBits <= 0xF); regBits <<= 35; return code ^ regBits; } /***************************************************************************** * * Returns an encoding for the specified register to be used in the bit3-5 * part of an SIB byte (unshifted). * Used exclusively to generate the REX.X bit and truncate the register. */ inline unsigned emitter::insEncodeRegSIB(instruction ins, regNumber reg, code_t* code) { assert(reg < REG_STK); #ifdef TARGET_AMD64 // Either code is not NULL or reg is not an extended reg. // If reg is an extended reg, instruction needs to be prefixed with 'REX' // which would require code != NULL. assert(code != nullptr || reg < REG_R8 || (reg >= REG_XMM0 && reg < REG_XMM8)); if (IsExtendedReg(reg)) { *code = AddRexXPrefix(ins, *code); // REX.X } unsigned regBits = RegEncoding(reg); #else // !TARGET_AMD64 unsigned regBits = reg; #endif // !TARGET_AMD64 assert(regBits < 8); return regBits; } /***************************************************************************** * * Returns the "[r/m]" opcode with the mod/RM field set to register. */ inline emitter::code_t emitter::insEncodeMRreg(instruction ins, code_t code) { // If Byte 4 (which is 0xFF00) is 0, that's where the RM encoding goes. // Otherwise, it will be placed after the 4 byte encoding. if ((code & 0xFF00) == 0) { assert((code & 0xC000) == 0); code |= 0xC000; } return code; } /***************************************************************************** * * Returns the given "[r/m]" opcode with the mod/RM field set to register. */ inline emitter::code_t emitter::insEncodeRMreg(instruction ins, code_t code) { // If Byte 4 (which is 0xFF00) is 0, that's where the RM encoding goes. // Otherwise, it will be placed after the 4 byte encoding. if ((code & 0xFF00) == 0) { assert((code & 0xC000) == 0); code |= 0xC000; } return code; } /***************************************************************************** * * Returns the "byte ptr [r/m]" opcode with the mod/RM field set to * the given register. */ inline emitter::code_t emitter::insEncodeMRreg(instruction ins, regNumber reg, emitAttr size, code_t code) { assert((code & 0xC000) == 0); code |= 0xC000; unsigned regcode = insEncodeReg012(ins, reg, size, &code) << 8; code |= regcode; return code; } /***************************************************************************** * * Returns the "byte ptr [r/m], icon" opcode with the mod/RM field set to * the given register. */ inline emitter::code_t emitter::insEncodeMIreg(instruction ins, regNumber reg, emitAttr size, code_t code) { assert((code & 0xC000) == 0); code |= 0xC000; unsigned regcode = insEncodeReg012(ins, reg, size, &code) << 8; code |= regcode; return code; } /***************************************************************************** * * Returns true iff the given instruction does not have a "[r/m], icon" form, but *does* have a * "reg,reg,imm8" form. */ inline bool insNeedsRRIb(instruction ins) { // If this list gets longer, use a switch or a table. return ins == INS_imul; } /***************************************************************************** * * Returns the "reg,reg,imm8" opcode with both the reg's set to the * the given register. */ inline emitter::code_t emitter::insEncodeRRIb(instruction ins, regNumber reg, emitAttr size) { assert(size == EA_4BYTE); // All we handle for now. assert(insNeedsRRIb(ins)); // If this list gets longer, use a switch, or a table lookup. code_t code = 0x69c0; unsigned regcode = insEncodeReg012(ins, reg, size, &code); // We use the same register as source and destination. (Could have another version that does both regs...) code |= regcode; code |= (regcode << 3); return code; } /***************************************************************************** * * Returns the "+reg" opcode with the the given register set into the low * nibble of the opcode */ inline emitter::code_t emitter::insEncodeOpreg(instruction ins, regNumber reg, emitAttr size) { code_t code = insCodeRR(ins); unsigned regcode = insEncodeReg012(ins, reg, size, &code); code |= regcode; return code; } /***************************************************************************** * * Return the 'SS' field value for the given index scale factor. */ inline unsigned emitter::insSSval(unsigned scale) { assert(scale == 1 || scale == 2 || scale == 4 || scale == 8); const static BYTE scales[] = { 0x00, // 1 0x40, // 2 0xFF, // 3 0x80, // 4 0xFF, // 5 0xFF, // 6 0xFF, // 7 0xC0, // 8 }; return scales[scale - 1]; } const instruction emitJumpKindInstructions[] = {INS_nop, #define JMP_SMALL(en, rev, ins) INS_##ins, #include "emitjmps.h" INS_call}; const emitJumpKind emitReverseJumpKinds[] = { EJ_NONE, #define JMP_SMALL(en, rev, ins) EJ_##rev, #include "emitjmps.h" }; /***************************************************************************** * Look up the instruction for a jump kind */ /*static*/ instruction emitter::emitJumpKindToIns(emitJumpKind jumpKind) { assert((unsigned)jumpKind < ArrLen(emitJumpKindInstructions)); return emitJumpKindInstructions[jumpKind]; } /***************************************************************************** * Reverse the conditional jump */ /* static */ emitJumpKind emitter::emitReverseJumpKind(emitJumpKind jumpKind) { assert(jumpKind < EJ_COUNT); return emitReverseJumpKinds[jumpKind]; } /***************************************************************************** * The size for these instructions is less than EA_4BYTE, * but the target register need not be byte-addressable */ inline bool emitInstHasNoCode(instruction ins) { if (ins == INS_align) { return true; } return false; } /***************************************************************************** * When encoding instructions that operate on byte registers * we have to ensure that we use a low register (EAX, EBX, ECX or EDX) * otherwise we will incorrectly encode the instruction */ bool emitter::emitVerifyEncodable(instruction ins, emitAttr size, regNumber reg1, regNumber reg2 /* = REG_NA */) { #if CPU_HAS_BYTE_REGS if (size != EA_1BYTE) // Not operating on a byte register is fine { return true; } if ((ins != INS_movsx) && // These three instructions support high register (ins != INS_movzx) // encodings for reg1 #ifdef FEATURE_HW_INTRINSICS && (ins != INS_crc32) #endif ) { // reg1 must be a byte-able register if ((genRegMask(reg1) & RBM_BYTE_REGS) == 0) { return false; } } // if reg2 is not REG_NA then reg2 must be a byte-able register if ((reg2 != REG_NA) && ((genRegMask(reg2) & RBM_BYTE_REGS) == 0)) { return false; } #endif // The instruction can be encoded return true; } //------------------------------------------------------------------------ // emitInsSize: Estimate the size (in bytes of generated code) of the given instruction. // // Arguments: // code -- The current opcode and any known prefixes // includeRexPrefixSize -- If Rex Prefix size should be included or not // inline UNATIVE_OFFSET emitter::emitInsSize(code_t code, bool includeRexPrefixSize) { UNATIVE_OFFSET size = (code & 0xFF000000) ? 4 : (code & 0x00FF0000) ? 3 : 2; #ifdef TARGET_AMD64 size += emitGetPrefixSize(code, includeRexPrefixSize); #endif return size; } //------------------------------------------------------------------------ // emitInsSizeRR: Determines the code size for an instruction encoding that does not have any addressing modes // // Arguments: // ins -- The instruction being emitted // code -- The current opcode and any known prefixes inline UNATIVE_OFFSET emitter::emitInsSizeRR(instrDesc* id, code_t code) { assert(id->idIns() != INS_invalid); instruction ins = id->idIns(); emitAttr attr = id->idOpSize(); UNATIVE_OFFSET sz = emitGetAdjustedSize(ins, attr, code); bool includeRexPrefixSize = true; // REX prefix if (TakesRexWPrefix(ins, attr) || IsExtendedReg(id->idReg1(), attr) || IsExtendedReg(id->idReg2(), attr) || (!id->idIsSmallDsc() && (IsExtendedReg(id->idReg3(), attr) || IsExtendedReg(id->idReg4(), attr)))) { sz += emitGetRexPrefixSize(ins); includeRexPrefixSize = !IsAVXInstruction(ins); } sz += emitInsSize(code, includeRexPrefixSize); return sz; } //------------------------------------------------------------------------ // emitInsSizeRR: Determines the code size for an instruction encoding that does not have any addressing modes and // includes an immediate value // // Arguments: // ins -- The instruction being emitted // code -- The current opcode and any known prefixes // val -- The immediate value to encode inline UNATIVE_OFFSET emitter::emitInsSizeRR(instrDesc* id, code_t code, int val) { instruction ins = id->idIns(); UNATIVE_OFFSET valSize = EA_SIZE_IN_BYTES(id->idOpSize()); bool valInByte = ((signed char)val == val) && (ins != INS_mov) && (ins != INS_test); #ifdef TARGET_AMD64 // mov reg, imm64 is the only opcode which takes a full 8 byte immediate // all other opcodes take a sign-extended 4-byte immediate noway_assert(valSize <= sizeof(INT32) || !id->idIsCnsReloc()); #endif // TARGET_AMD64 if (valSize > sizeof(INT32)) { valSize = sizeof(INT32); } if (id->idIsCnsReloc()) { valInByte = false; // relocs can't be placed in a byte assert(valSize == sizeof(INT32)); } if (valInByte) { valSize = sizeof(char); } else { assert(!IsSSEOrAVXInstruction(ins)); } return valSize + emitInsSizeRR(id, code); } inline UNATIVE_OFFSET emitter::emitInsSizeRR(instruction ins, regNumber reg1, regNumber reg2, emitAttr attr) { emitAttr size = EA_SIZE(attr); // If Byte 4 (which is 0xFF00) is zero, that's where the RM encoding goes. // Otherwise, it will be placed after the 4 byte encoding, making the total 5 bytes. // This would probably be better expressed as a different format or something? code_t code = insCodeRM(ins); UNATIVE_OFFSET sz = emitGetAdjustedSize(ins, size, insCodeRM(ins)); bool includeRexPrefixSize = true; // REX prefix if (!hasRexPrefix(code)) { if ((TakesRexWPrefix(ins, size) && ((ins != INS_xor) || (reg1 != reg2))) || IsExtendedReg(reg1, attr) || IsExtendedReg(reg2, attr)) { sz += emitGetRexPrefixSize(ins); includeRexPrefixSize = false; } } if ((code & 0xFF00) != 0) { sz += IsSSEOrAVXInstruction(ins) ? emitInsSize(code, includeRexPrefixSize) : 5; } else { sz += emitInsSize(insEncodeRMreg(ins, code), includeRexPrefixSize); } return sz; } /*****************************************************************************/ inline UNATIVE_OFFSET emitter::emitInsSizeSV(code_t code, int var, int dsp) { UNATIVE_OFFSET size = emitInsSize(code, /* includeRexPrefixSize */ true); UNATIVE_OFFSET offs; bool offsIsUpperBound = true; bool EBPbased = true; /* Is this a temporary? */ if (var < 0) { /* An address off of ESP takes an extra byte */ if (!emitHasFramePtr) { size++; } // The offset is already assigned. Find the temp. TempDsc* tmp = codeGen->regSet.tmpFindNum(var, RegSet::TEMP_USAGE_USED); if (tmp == nullptr) { // It might be in the free lists, if we're working on zero initializing the temps. tmp = codeGen->regSet.tmpFindNum(var, RegSet::TEMP_USAGE_FREE); } assert(tmp != nullptr); offs = tmp->tdTempOffs(); // We only care about the magnitude of the offset here, to determine instruction size. if (emitComp->isFramePointerUsed()) { if ((int)offs < 0) { offs = -(int)offs; } } else { // SP-based offsets must already be positive. assert((int)offs >= 0); } } else { /* Get the frame offset of the (non-temp) variable */ offs = dsp + emitComp->lvaFrameAddress(var, &EBPbased); /* An address off of ESP takes an extra byte */ if (!EBPbased) { ++size; } /* Is this a stack parameter reference? */ if ((emitComp->lvaIsParameter(var) #if !defined(TARGET_AMD64) || defined(UNIX_AMD64_ABI) && !emitComp->lvaIsRegArgument(var) #endif // !TARGET_AMD64 || UNIX_AMD64_ABI ) || (static_cast<unsigned>(var) == emitComp->lvaRetAddrVar)) { /* If no EBP frame, arguments and ret addr are off of ESP, above temps */ if (!EBPbased) { assert((int)offs >= 0); offsIsUpperBound = false; // since #temps can increase offs += emitMaxTmpSize; } } else { /* Locals off of EBP are at negative offsets */ if (EBPbased) { #if defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI) // If localloc is not used, then ebp chaining is done and hence // offset of locals will be at negative offsets, Otherwise offsets // will be positive. In future, when RBP gets positioned in the // middle of the frame so as to optimize instruction encoding size, // the below asserts needs to be modified appropriately. // However, for Unix platforms, we always do frame pointer chaining, // so offsets from the frame pointer will always be negative. if (emitComp->compLocallocUsed || emitComp->opts.compDbgEnC) { noway_assert((int)offs >= 0); } else #endif { // Dev10 804810 - failing this assert can lead to bad codegen and runtime crashes CLANG_FORMAT_COMMENT_ANCHOR; #ifdef UNIX_AMD64_ABI const LclVarDsc* varDsc = emitComp->lvaGetDesc(var); bool isRegPassedArg = varDsc->lvIsParam && varDsc->lvIsRegArg; // Register passed args could have a stack offset of 0. noway_assert((int)offs < 0 || isRegPassedArg || emitComp->opts.IsOSR()); #else // !UNIX_AMD64_ABI // OSR transitioning to RBP frame currently can have mid-frame FP noway_assert(((int)offs < 0) || emitComp->opts.IsOSR()); #endif // !UNIX_AMD64_ABI } assert(emitComp->lvaTempsHaveLargerOffsetThanVars()); // lvaInlinedPInvokeFrameVar and lvaStubArgumentVar are placed below the temps if (unsigned(var) == emitComp->lvaInlinedPInvokeFrameVar || unsigned(var) == emitComp->lvaStubArgumentVar) { offs -= emitMaxTmpSize; } if ((int)offs < 0) { // offset is negative return size + ((int(offs) >= SCHAR_MIN) ? sizeof(char) : sizeof(int)); } #ifdef TARGET_AMD64 // This case arises for localloc frames else { return size + ((offs <= SCHAR_MAX) ? sizeof(char) : sizeof(int)); } #endif } if (emitComp->lvaTempsHaveLargerOffsetThanVars() == false) { offs += emitMaxTmpSize; } } } assert((int)offs >= 0); #if !FEATURE_FIXED_OUT_ARGS /* Are we addressing off of ESP? */ if (!emitHasFramePtr) { /* Adjust the effective offset if necessary */ if (emitCntStackDepth) offs += emitCurStackLvl; // we could (and used to) check for the special case [sp] here but the stack offset // estimator was off, and there is very little harm in overestimating for such a // rare case. } #endif // !FEATURE_FIXED_OUT_ARGS // printf("lcl = %04X, tmp = %04X, stk = %04X, offs = %04X\n", // emitLclSize, emitMaxTmpSize, emitCurStackLvl, offs); #ifdef TARGET_AMD64 bool useSmallEncoding = (SCHAR_MIN <= (int)offs) && ((int)offs <= SCHAR_MAX); #else bool useSmallEncoding = (offs <= size_t(SCHAR_MAX)); #endif // If it is ESP based, and the offset is zero, we will not encode the disp part. if (!EBPbased && offs == 0) { return size; } else { return size + (useSmallEncoding ? sizeof(char) : sizeof(int)); } } inline UNATIVE_OFFSET emitter::emitInsSizeSV(instrDesc* id, code_t code, int var, int dsp) { assert(id->idIns() != INS_invalid); instruction ins = id->idIns(); emitAttr attrSize = id->idOpSize(); UNATIVE_OFFSET prefix = emitGetAdjustedSize(ins, attrSize, code); // REX prefix if (TakesRexWPrefix(ins, attrSize) || IsExtendedReg(id->idReg1(), attrSize) || IsExtendedReg(id->idReg2(), attrSize)) { prefix += emitGetRexPrefixSize(ins); } return prefix + emitInsSizeSV(code, var, dsp); } inline UNATIVE_OFFSET emitter::emitInsSizeSV(instrDesc* id, code_t code, int var, int dsp, int val) { assert(id->idIns() != INS_invalid); instruction ins = id->idIns(); emitAttr attrSize = id->idOpSize(); UNATIVE_OFFSET valSize = EA_SIZE_IN_BYTES(attrSize); UNATIVE_OFFSET prefix = emitGetAdjustedSize(ins, attrSize, code); bool valInByte = ((signed char)val == val) && (ins != INS_mov) && (ins != INS_test); #ifdef TARGET_AMD64 // mov reg, imm64 is the only opcode which takes a full 8 byte immediate // all other opcodes take a sign-extended 4-byte immediate noway_assert(valSize <= sizeof(int) || !id->idIsCnsReloc()); #endif // TARGET_AMD64 if (valSize > sizeof(int)) { valSize = sizeof(int); } if (id->idIsCnsReloc()) { valInByte = false; // relocs can't be placed in a byte assert(valSize == sizeof(int)); } if (valInByte) { valSize = sizeof(char); } else { assert(!IsSSEOrAVXInstruction(ins)); } // 64-bit operand instructions will need a REX.W prefix if (TakesRexWPrefix(ins, attrSize) || IsExtendedReg(id->idReg1(), attrSize) || IsExtendedReg(id->idReg2(), attrSize)) { prefix += emitGetRexPrefixSize(ins); } return prefix + valSize + emitInsSizeSV(code, var, dsp); } /*****************************************************************************/ static bool baseRegisterRequiresSibByte(regNumber base) { #ifdef TARGET_AMD64 return base == REG_ESP || base == REG_R12; #else return base == REG_ESP; #endif } static bool baseRegisterRequiresDisplacement(regNumber base) { #ifdef TARGET_AMD64 return base == REG_EBP || base == REG_R13; #else return base == REG_EBP; #endif } UNATIVE_OFFSET emitter::emitInsSizeAM(instrDesc* id, code_t code) { assert(id->idIns() != INS_invalid); instruction ins = id->idIns(); emitAttr attrSize = id->idOpSize(); /* The displacement field is in an unusual place for (tail-)calls */ ssize_t dsp = (ins == INS_call) || (ins == INS_tail_i_jmp) ? emitGetInsCIdisp(id) : emitGetInsAmdAny(id); bool dspInByte = ((signed char)dsp == (ssize_t)dsp); bool dspIsZero = (dsp == 0); UNATIVE_OFFSET size; // Note that the values in reg and rgx are used in this method to decide // how many bytes will be needed by the address [reg+rgx+cns] // this includes the prefix bytes when reg or rgx are registers R8-R15 regNumber reg; regNumber rgx; // The idAddr field is a union and only some of the instruction formats use the iiaAddrMode variant // these are IF_AWR_*, IF_ARD_*, IF_ARW_* and IF_*_ARD // ideally these should really be the only idInsFmts that we see here // but we have some outliers to deal with: // emitIns_R_L adds IF_RWR_LABEL and calls emitInsSizeAM // emitInsRMW adds IF_MRW_CNS, IF_MRW_RRD, IF_MRW_SHF, and calls emitInsSizeAM switch (id->idInsFmt()) { case IF_RWR_LABEL: case IF_MRW_CNS: case IF_MRW_RRD: case IF_MRW_SHF: reg = REG_NA; rgx = REG_NA; break; default: reg = id->idAddr()->iiaAddrMode.amBaseReg; rgx = id->idAddr()->iiaAddrMode.amIndxReg; break; } if (id->idIsDspReloc()) { dspInByte = false; // relocs can't be placed in a byte dspIsZero = false; // relocs won't always be zero } if (code & 0xFF000000) { size = 4; } else if (code & 0x00FF0000) { // BT supports 16 bit operands and this code doesn't handle the necessary 66 prefix. assert(ins != INS_bt); assert((attrSize == EA_4BYTE) || (attrSize == EA_PTRSIZE) // Only for x64 || (attrSize == EA_16BYTE) || (attrSize == EA_32BYTE) // only for x64 || (ins == INS_movzx) || (ins == INS_movsx) // The prefetch instructions are always 3 bytes and have part of their modr/m byte hardcoded || isPrefetch(ins)); size = 3; } else { size = 2; } size += emitGetAdjustedSize(ins, attrSize, code); if (hasRexPrefix(code)) { // REX prefix size += emitGetRexPrefixSize(ins); } else if (TakesRexWPrefix(ins, attrSize)) { // REX.W prefix size += emitGetRexPrefixSize(ins); } else if (IsExtendedReg(reg, EA_PTRSIZE) || IsExtendedReg(rgx, EA_PTRSIZE) || ((ins != INS_call) && (IsExtendedReg(id->idReg1(), attrSize) || IsExtendedReg(id->idReg2(), attrSize)))) { // Should have a REX byte size += emitGetRexPrefixSize(ins); } if (rgx == REG_NA) { /* The address is of the form "[reg+disp]" */ if (reg == REG_NA) { /* The address is of the form "[disp]" */ CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_X86 // Special case: "mov eax, [disp]" and "mov [disp], eax" can use a smaller 1-byte encoding. if ((ins == INS_mov) && (id->idReg1() == REG_EAX) && ((id->idInsFmt() == IF_RWR_ARD) || (id->idInsFmt() == IF_AWR_RRD))) { // Amd64: this is one case where addr can be 64-bit in size. This is currently unused. // If this ever changes, this code will need to be updated to add "sizeof(INT64)" to "size". assert((size == 2) || ((size == 3) && (id->idOpSize() == EA_2BYTE))); size--; } #endif size += sizeof(INT32); #ifdef TARGET_AMD64 // If id is not marked for reloc, add 1 additional byte for SIB that follows disp32 if (!id->idIsDspReloc()) { size++; } #endif return size; } // If this is just "call reg", we're done. if (id->idIsCallRegPtr()) { assert(ins == INS_call || ins == INS_tail_i_jmp); assert(dsp == 0); return size; } // If the base register is ESP (or R12 on 64-bit systems), a SIB byte must be used. if (baseRegisterRequiresSibByte(reg)) { size++; } // If the base register is EBP (or R13 on 64-bit systems), a displacement is required. // Otherwise, the displacement can be elided if it is zero. if (dspIsZero && !baseRegisterRequiresDisplacement(reg)) { return size; } /* Does the offset fit in a byte? */ if (dspInByte) { size += sizeof(char); } else { size += sizeof(INT32); } } else { /* An index register is present */ size++; /* Is the index value scaled? */ if (emitDecodeScale(id->idAddr()->iiaAddrMode.amScale) > 1) { /* Is there a base register? */ if (reg != REG_NA) { /* The address is "[reg + {2/4/8} * rgx + icon]" */ if (dspIsZero && !baseRegisterRequiresDisplacement(reg)) { /* The address is "[reg + {2/4/8} * rgx]" */ } else { /* The address is "[reg + {2/4/8} * rgx + disp]" */ if (dspInByte) { size += sizeof(char); } else { size += sizeof(int); } } } else { /* The address is "[{2/4/8} * rgx + icon]" */ size += sizeof(INT32); } } else { // When we are using the SIB or VSIB format with EBP or R13 as a base, we must emit at least // a 1 byte displacement (this is a special case in the encoding to allow for the case of no // base register at all). In order to avoid this when we have no scaling, we can reverse the // registers so that we don't have to add that extra byte. However, we can't do that if the // index register is a vector, such as for a gather instruction. // if (dspIsZero && baseRegisterRequiresDisplacement(reg) && !baseRegisterRequiresDisplacement(rgx) && !isFloatReg(rgx)) { // Swap reg and rgx, such that reg is not EBP/R13. regNumber tmp = reg; id->idAddr()->iiaAddrMode.amBaseReg = reg = rgx; id->idAddr()->iiaAddrMode.amIndxReg = rgx = tmp; } /* The address is "[reg+rgx+dsp]" */ if (dspIsZero && !baseRegisterRequiresDisplacement(reg)) { /* This is [reg+rgx]" */ } else { /* This is [reg+rgx+dsp]" */ if (dspInByte) { size += sizeof(char); } else { size += sizeof(int); } } } } return size; } inline UNATIVE_OFFSET emitter::emitInsSizeAM(instrDesc* id, code_t code, int val) { assert(id->idIns() != INS_invalid); instruction ins = id->idIns(); UNATIVE_OFFSET valSize = EA_SIZE_IN_BYTES(id->idOpSize()); bool valInByte = ((signed char)val == val) && (ins != INS_mov) && (ins != INS_test); // We should never generate BT mem,reg because it has poor performance. BT mem,imm might be useful // but it requires special handling of the immediate value (it is always encoded in a byte). // Let's not complicate things until this is needed. assert(ins != INS_bt); #ifdef TARGET_AMD64 // mov reg, imm64 is the only opcode which takes a full 8 byte immediate // all other opcodes take a sign-extended 4-byte immediate noway_assert(valSize <= sizeof(INT32) || !id->idIsCnsReloc()); #endif // TARGET_AMD64 if (valSize > sizeof(INT32)) { valSize = sizeof(INT32); } if (id->idIsCnsReloc()) { valInByte = false; // relocs can't be placed in a byte assert(valSize == sizeof(INT32)); } if (valInByte) { valSize = sizeof(char); } else { assert(!IsSSEOrAVXInstruction(ins)); } return valSize + emitInsSizeAM(id, code); } inline UNATIVE_OFFSET emitter::emitInsSizeCV(instrDesc* id, code_t code) { assert(id->idIns() != INS_invalid); instruction ins = id->idIns(); emitAttr attrSize = id->idOpSize(); // fgMorph changes any statics that won't fit into 32-bit addresses // into constants with an indir, rather than GT_CLS_VAR // so we should only hit this path for statics that are RIP-relative UNATIVE_OFFSET size = sizeof(INT32); size += emitGetAdjustedSize(ins, attrSize, code); bool includeRexPrefixSize = true; // 64-bit operand instructions will need a REX.W prefix if (TakesRexWPrefix(ins, attrSize) || IsExtendedReg(id->idReg1(), attrSize) || IsExtendedReg(id->idReg2(), attrSize)) { size += emitGetRexPrefixSize(ins); includeRexPrefixSize = false; } return size + emitInsSize(code, includeRexPrefixSize); } inline UNATIVE_OFFSET emitter::emitInsSizeCV(instrDesc* id, code_t code, int val) { instruction ins = id->idIns(); UNATIVE_OFFSET valSize = EA_SIZE_IN_BYTES(id->idOpSize()); bool valInByte = ((signed char)val == val) && (ins != INS_mov) && (ins != INS_test); #ifdef TARGET_AMD64 // mov reg, imm64 is the only opcode which takes a full 8 byte immediate // all other opcodes take a sign-extended 4-byte immediate noway_assert(valSize <= sizeof(INT32) || !id->idIsCnsReloc()); #endif // TARGET_AMD64 if (valSize > sizeof(INT32)) { valSize = sizeof(INT32); } if (id->idIsCnsReloc()) { valInByte = false; // relocs can't be placed in a byte assert(valSize == sizeof(INT32)); } if (valInByte) { valSize = sizeof(char); } else { assert(!IsSSEOrAVXInstruction(ins)); } return valSize + emitInsSizeCV(id, code); } /***************************************************************************** * * Allocate instruction descriptors for instructions with address modes. */ inline emitter::instrDesc* emitter::emitNewInstrAmd(emitAttr size, ssize_t dsp) { if (dsp < AM_DISP_MIN || dsp > AM_DISP_MAX) { instrDescAmd* id = emitAllocInstrAmd(size); id->idSetIsLargeDsp(); #ifdef DEBUG id->idAddr()->iiaAddrMode.amDisp = AM_DISP_BIG_VAL; #endif id->idaAmdVal = dsp; return id; } else { instrDesc* id = emitAllocInstr(size); id->idAddr()->iiaAddrMode.amDisp = dsp; assert(id->idAddr()->iiaAddrMode.amDisp == dsp); // make sure the value fit return id; } } /***************************************************************************** * * Set the displacement field in an instruction. Only handles instrDescAmd type. */ inline void emitter::emitSetAmdDisp(instrDescAmd* id, ssize_t dsp) { if (dsp < AM_DISP_MIN || dsp > AM_DISP_MAX) { id->idSetIsLargeDsp(); #ifdef DEBUG id->idAddr()->iiaAddrMode.amDisp = AM_DISP_BIG_VAL; #endif id->idaAmdVal = dsp; } else { id->idSetIsSmallDsp(); id->idAddr()->iiaAddrMode.amDisp = dsp; assert(id->idAddr()->iiaAddrMode.amDisp == dsp); // make sure the value fit } } /***************************************************************************** * * Allocate an instruction descriptor for an instruction that uses both * an address mode displacement and a constant. */ emitter::instrDesc* emitter::emitNewInstrAmdCns(emitAttr size, ssize_t dsp, int cns) { if (dsp >= AM_DISP_MIN && dsp <= AM_DISP_MAX) { instrDesc* id = emitNewInstrCns(size, cns); id->idAddr()->iiaAddrMode.amDisp = dsp; assert(id->idAddr()->iiaAddrMode.amDisp == dsp); // make sure the value fit return id; } else { if (instrDesc::fitsInSmallCns(cns)) { instrDescAmd* id = emitAllocInstrAmd(size); id->idSetIsLargeDsp(); #ifdef DEBUG id->idAddr()->iiaAddrMode.amDisp = AM_DISP_BIG_VAL; #endif id->idaAmdVal = dsp; id->idSmallCns(cns); return id; } else { instrDescCnsAmd* id = emitAllocInstrCnsAmd(size); id->idSetIsLargeCns(); id->idacCnsVal = cns; id->idSetIsLargeDsp(); #ifdef DEBUG id->idAddr()->iiaAddrMode.amDisp = AM_DISP_BIG_VAL; #endif id->idacAmdVal = dsp; return id; } } } /***************************************************************************** * * Add a NOP instruction of the given size. */ void emitter::emitIns_Nop(unsigned size) { assert(size <= MAX_ENCODED_SIZE); instrDesc* id = emitNewInstr(); id->idIns(INS_nop); id->idInsFmt(IF_NONE); id->idCodeSize(size); dispIns(id); emitCurIGsize += size; } /***************************************************************************** * * Add an instruction with no operands. */ void emitter::emitIns(instruction ins) { UNATIVE_OFFSET sz; instrDesc* id = emitNewInstr(); code_t code = insCodeMR(ins); #ifdef DEBUG { // We cannot have #ifdef inside macro expansion. bool assertCond = (ins == INS_cdq || ins == INS_int3 || ins == INS_lock || ins == INS_leave || ins == INS_movsb || ins == INS_movsd || ins == INS_movsp || ins == INS_nop || ins == INS_r_movsb || ins == INS_r_movsd || ins == INS_r_movsp || ins == INS_r_stosb || ins == INS_r_stosd || ins == INS_r_stosp || ins == INS_ret || ins == INS_sahf || ins == INS_stosb || ins == INS_stosd || ins == INS_stosp // These instructions take zero operands || ins == INS_vzeroupper || ins == INS_lfence || ins == INS_mfence || ins == INS_sfence || ins == INS_pause); assert(assertCond); } #endif // DEBUG assert(!hasRexPrefix(code)); // Can't have a REX bit with no operands, right? if (code & 0xFF000000) { sz = 2; // TODO-XArch-Bug?: Shouldn't this be 4? Or maybe we should assert that we don't see this case. } else if (code & 0x00FF0000) { sz = 3; } else if (code & 0x0000FF00) { sz = 2; } else { sz = 1; } // vzeroupper includes its 2-byte VEX prefix in its MR code. assert((ins != INS_vzeroupper) || (sz == 3)); insFormat fmt = IF_NONE; id->idIns(ins); id->idInsFmt(fmt); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } // Add an instruction with no operands, but whose encoding depends on the size // (Only CDQ/CQO/CWDE/CDQE currently) void emitter::emitIns(instruction ins, emitAttr attr) { UNATIVE_OFFSET sz; instrDesc* id = emitNewInstr(attr); code_t code = insCodeMR(ins); assert((ins == INS_cdq) || (ins == INS_cwde)); assert((code & 0xFFFFFF00) == 0); sz = 1; insFormat fmt = IF_NONE; sz += emitGetAdjustedSize(ins, attr, code); if (TakesRexWPrefix(ins, attr)) { sz += emitGetRexPrefixSize(ins); } id->idIns(ins); id->idInsFmt(fmt); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } //------------------------------------------------------------------------ // emitMapFmtForIns: map the instruction format based on the instruction. // Shift-by-a-constant instructions have a special format. // // Arguments: // fmt - the instruction format to map // ins - the instruction // // Returns: // The mapped instruction format. // emitter::insFormat emitter::emitMapFmtForIns(insFormat fmt, instruction ins) { switch (ins) { case INS_rol_N: case INS_ror_N: case INS_rcl_N: case INS_rcr_N: case INS_shl_N: case INS_shr_N: case INS_sar_N: { switch (fmt) { case IF_RRW_CNS: return IF_RRW_SHF; case IF_MRW_CNS: return IF_MRW_SHF; case IF_SRW_CNS: return IF_SRW_SHF; case IF_ARW_CNS: return IF_ARW_SHF; default: unreached(); } } default: return fmt; } } //------------------------------------------------------------------------ // emitMapFmtAtoM: map the address mode formats ARD, ARW, and AWR to their direct address equivalents. // // Arguments: // fmt - the instruction format to map // // Returns: // The mapped instruction format. // emitter::insFormat emitter::emitMapFmtAtoM(insFormat fmt) { switch (fmt) { case IF_ARD: return IF_MRD; case IF_AWR: return IF_MWR; case IF_ARW: return IF_MRW; case IF_RRD_ARD: return IF_RRD_MRD; case IF_RWR_ARD: return IF_RWR_MRD; case IF_RWR_ARD_CNS: return IF_RWR_MRD_CNS; case IF_RRW_ARD: return IF_RRW_MRD; case IF_RRW_ARD_CNS: return IF_RRW_MRD_CNS; case IF_RWR_RRD_ARD: return IF_RWR_RRD_MRD; case IF_RWR_RRD_ARD_CNS: return IF_RWR_RRD_MRD_CNS; case IF_RWR_RRD_ARD_RRD: return IF_RWR_RRD_MRD_RRD; case IF_ARD_RRD: return IF_MRD_RRD; case IF_AWR_RRD: return IF_MWR_RRD; case IF_ARW_RRD: return IF_MRW_RRD; case IF_ARD_CNS: return IF_MRD_CNS; case IF_AWR_CNS: return IF_MWR_CNS; case IF_ARW_CNS: return IF_MRW_CNS; case IF_AWR_RRD_CNS: return IF_MWR_RRD_CNS; case IF_ARW_SHF: return IF_MRW_SHF; default: unreached(); } } //------------------------------------------------------------------------ // emitHandleMemOp: For a memory operand, fill in the relevant fields of the instrDesc. // // Arguments: // indir - the memory operand. // id - the instrDesc to fill in. // fmt - the instruction format to use. This must be one of the ARD, AWR, or ARW formats. If necessary (such as for // GT_CLS_VAR_ADDR), this function will map it to the correct format. // ins - the instruction we are generating. This might affect the instruction format we choose. // // Assumptions: // The correctly sized instrDesc must already be created, e.g., via emitNewInstrAmd() or emitNewInstrAmdCns(); // // Post-conditions: // For base address of int constant: // -- the caller must have added the int constant base to the instrDesc when creating it via // emitNewInstrAmdCns(). // For simple address modes (base + scale * index + offset): // -- the base register, index register, and scale factor are set. // -- the caller must have added the addressing mode offset int constant to the instrDesc when creating it via // emitNewInstrAmdCns(). // // The instruction format is set. // // idSetIsDspReloc() is called if necessary. // void emitter::emitHandleMemOp(GenTreeIndir* indir, instrDesc* id, insFormat fmt, instruction ins) { assert(fmt != IF_NONE); GenTree* memBase = indir->Base(); if ((memBase != nullptr) && memBase->isContained() && (memBase->OperGet() == GT_CLS_VAR_ADDR)) { CORINFO_FIELD_HANDLE fldHnd = memBase->AsClsVar()->gtClsVarHnd; // Static always need relocs if (!jitStaticFldIsGlobAddr(fldHnd)) { // Contract: // fgMorphField() changes any statics that won't fit into 32-bit addresses into // constants with an indir, rather than GT_CLS_VAR, based on reloc type hint given // by VM. Hence emitter should always mark GT_CLS_VAR_ADDR as relocatable. // // Data section constants: these get allocated close to code block of the method and // always addressable IP relative. These too should be marked as relocatable. id->idSetIsDspReloc(); } id->idAddr()->iiaFieldHnd = fldHnd; id->idInsFmt(emitMapFmtForIns(emitMapFmtAtoM(fmt), ins)); } else if ((memBase != nullptr) && memBase->IsCnsIntOrI() && memBase->isContained()) { // Absolute addresses marked as contained should fit within the base of addr mode. assert(memBase->AsIntConCommon()->FitsInAddrBase(emitComp)); // If we reach here, either: // - we are not generating relocatable code, (typically the non-AOT JIT case) // - the base address is a handle represented by an integer constant, // - the base address is a constant zero, or // - the base address is a constant that fits into the memory instruction (this can happen on x86). // This last case is captured in the FitsInAddrBase method which is used by Lowering to determine that it can // be contained. // assert(!emitComp->opts.compReloc || memBase->IsIconHandle() || memBase->IsIntegralConst(0) || memBase->AsIntConCommon()->FitsInAddrBase(emitComp)); if (memBase->AsIntConCommon()->AddrNeedsReloc(emitComp)) { id->idSetIsDspReloc(); } id->idAddr()->iiaAddrMode.amBaseReg = REG_NA; id->idAddr()->iiaAddrMode.amIndxReg = REG_NA; id->idAddr()->iiaAddrMode.amScale = emitter::OPSZ1; // for completeness id->idInsFmt(emitMapFmtForIns(fmt, ins)); // Absolute address must have already been set in the instrDesc constructor. assert(emitGetInsAmdAny(id) == memBase->AsIntConCommon()->IconValue()); } else { regNumber amBaseReg = REG_NA; if (memBase != nullptr) { assert(!memBase->isContained()); amBaseReg = memBase->GetRegNum(); assert(amBaseReg != REG_NA); } regNumber amIndxReg = REG_NA; if (indir->HasIndex()) { GenTree* index = indir->Index(); assert(!index->isContained()); amIndxReg = index->GetRegNum(); assert(amIndxReg != REG_NA); } assert((amBaseReg != REG_NA) || (amIndxReg != REG_NA) || (indir->Offset() != 0)); // At least one should be set. id->idAddr()->iiaAddrMode.amBaseReg = amBaseReg; id->idAddr()->iiaAddrMode.amIndxReg = amIndxReg; id->idAddr()->iiaAddrMode.amScale = emitEncodeScale(indir->Scale()); id->idInsFmt(emitMapFmtForIns(fmt, ins)); // disp must have already been set in the instrDesc constructor. assert(emitGetInsAmdAny(id) == indir->Offset()); // make sure "disp" is stored properly } } // Takes care of storing all incoming register parameters // into its corresponding shadow space (defined by the x64 ABI) void emitter::spillIntArgRegsToShadowSlots() { unsigned argNum; instrDesc* id; UNATIVE_OFFSET sz; assert(emitComp->compGeneratingProlog); for (argNum = 0; argNum < MAX_REG_ARG; ++argNum) { regNumber argReg = intArgRegs[argNum]; // The offsets for the shadow space start at RSP + 8 // (right before the caller return address) int offset = (argNum + 1) * EA_PTRSIZE; id = emitNewInstrAmd(EA_PTRSIZE, offset); id->idIns(INS_mov); id->idInsFmt(IF_AWR_RRD); id->idAddr()->iiaAddrMode.amBaseReg = REG_SPBASE; id->idAddr()->iiaAddrMode.amIndxReg = REG_NA; id->idAddr()->iiaAddrMode.amScale = emitEncodeScale(1); // The offset has already been set in the intrDsc ctor, // make sure we got it right. assert(emitGetInsAmdAny(id) == ssize_t(offset)); id->idReg1(argReg); sz = emitInsSizeAM(id, insCodeMR(INS_mov)); id->idCodeSize(sz); emitCurIGsize += sz; } } //------------------------------------------------------------------------ // emitInsLoadInd: Emits a "mov reg, [mem]" (or a variant such as "movzx" or "movss") // instruction for a GT_IND node. // // Arguments: // ins - the instruction to emit // attr - the instruction operand size // dstReg - the destination register // mem - the GT_IND node // void emitter::emitInsLoadInd(instruction ins, emitAttr attr, regNumber dstReg, GenTreeIndir* mem) { assert(mem->OperIs(GT_IND, GT_NULLCHECK)); GenTree* addr = mem->Addr(); if (addr->OperGet() == GT_CLS_VAR_ADDR) { emitIns_R_C(ins, attr, dstReg, addr->AsClsVar()->gtClsVarHnd, 0); return; } if (addr->OperIsLocalAddr()) { GenTreeLclVarCommon* varNode = addr->AsLclVarCommon(); unsigned offset = varNode->GetLclOffs(); emitIns_R_S(ins, attr, dstReg, varNode->GetLclNum(), offset); // Updating variable liveness after instruction was emitted. // TODO-Review: it appears that this call to genUpdateLife does nothing because it // returns quickly when passed GT_LCL_VAR_ADDR or GT_LCL_FLD_ADDR. Below, emitInsStoreInd // had similar code that replaced `varNode` with `mem` (to fix a GC hole). It might be // appropriate to do that here as well, but doing so showed no asm diffs, so it's not // clear when this scenario gets hit, at least for GC refs. codeGen->genUpdateLife(varNode); return; } assert(addr->OperIsAddrMode() || (addr->IsCnsIntOrI() && addr->isContained()) || !addr->isContained()); ssize_t offset = mem->Offset(); instrDesc* id = emitNewInstrAmd(attr, offset); id->idIns(ins); id->idReg1(dstReg); emitHandleMemOp(mem, id, IF_RWR_ARD, ins); UNATIVE_OFFSET sz = emitInsSizeAM(id, insCodeRM(ins)); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } //------------------------------------------------------------------------ // emitInsStoreInd: Emits a "mov [mem], reg/imm" (or a variant such as "movss") // instruction for a GT_STOREIND node. // // Arguments: // ins - the instruction to emit // attr - the instruction operand size // mem - the GT_STOREIND node // void emitter::emitInsStoreInd(instruction ins, emitAttr attr, GenTreeStoreInd* mem) { assert(mem->OperIs(GT_STOREIND)); GenTree* addr = mem->Addr(); GenTree* data = mem->Data(); if (addr->OperGet() == GT_CLS_VAR_ADDR) { if (data->isContainedIntOrIImmed()) { emitIns_C_I(ins, attr, addr->AsClsVar()->gtClsVarHnd, 0, (int)data->AsIntConCommon()->IconValue()); } else { assert(!data->isContained()); emitIns_C_R(ins, attr, addr->AsClsVar()->gtClsVarHnd, data->GetRegNum(), 0); } return; } if (addr->OperIsLocalAddr()) { GenTreeLclVarCommon* varNode = addr->AsLclVarCommon(); unsigned offset = varNode->GetLclOffs(); if (data->isContainedIntOrIImmed()) { emitIns_S_I(ins, attr, varNode->GetLclNum(), offset, (int)data->AsIntConCommon()->IconValue()); } else { assert(!data->isContained()); emitIns_S_R(ins, attr, data->GetRegNum(), varNode->GetLclNum(), offset); } // Updating variable liveness after instruction was emitted codeGen->genUpdateLife(mem); return; } ssize_t offset = mem->Offset(); UNATIVE_OFFSET sz; instrDesc* id; if (data->isContainedIntOrIImmed()) { int icon = (int)data->AsIntConCommon()->IconValue(); id = emitNewInstrAmdCns(attr, offset, icon); id->idIns(ins); emitHandleMemOp(mem, id, IF_AWR_CNS, ins); sz = emitInsSizeAM(id, insCodeMI(ins), icon); id->idCodeSize(sz); } else { assert(!data->isContained()); id = emitNewInstrAmd(attr, offset); id->idIns(ins); emitHandleMemOp(mem, id, IF_AWR_RRD, ins); id->idReg1(data->GetRegNum()); sz = emitInsSizeAM(id, insCodeMR(ins)); id->idCodeSize(sz); } dispIns(id); emitCurIGsize += sz; } //------------------------------------------------------------------------ // emitInsStoreLcl: Emits a "mov [mem], reg/imm" (or a variant such as "movss") // instruction for a GT_STORE_LCL_VAR node. // // Arguments: // ins - the instruction to emit // attr - the instruction operand size // varNode - the GT_STORE_LCL_VAR node // void emitter::emitInsStoreLcl(instruction ins, emitAttr attr, GenTreeLclVarCommon* varNode) { assert(varNode->OperIs(GT_STORE_LCL_VAR)); assert(varNode->GetRegNum() == REG_NA); // stack store GenTree* data = varNode->gtGetOp1(); codeGen->inst_set_SV_var(varNode); if (data->isContainedIntOrIImmed()) { emitIns_S_I(ins, attr, varNode->GetLclNum(), 0, (int)data->AsIntConCommon()->IconValue()); } else { assert(!data->isContained()); emitIns_S_R(ins, attr, data->GetRegNum(), varNode->GetLclNum(), 0); } // Updating variable liveness after instruction was emitted codeGen->genUpdateLife(varNode); } //------------------------------------------------------------------------ // emitInsBinary: Emits an instruction for a node which takes two operands // // Arguments: // ins - the instruction to emit // attr - the instruction operand size // dst - the destination and first source operand // src - the second source operand // // Assumptions: // i) caller of this routine needs to call genConsumeReg() // ii) caller of this routine needs to call genProduceReg() regNumber emitter::emitInsBinary(instruction ins, emitAttr attr, GenTree* dst, GenTree* src) { // We can only have one memory operand and only src can be a constant operand // However, the handling for a given operand type (mem, cns, or other) is fairly // consistent regardless of whether they are src or dst. As such, we will find // the type of each operand and only check them against src/dst where relevant. GenTree* memOp = nullptr; GenTree* cnsOp = nullptr; GenTree* otherOp = nullptr; if (dst->isContained() || (dst->isLclField() && (dst->GetRegNum() == REG_NA)) || dst->isUsedFromSpillTemp()) { // dst can only be a modrm // dst on 3opImul isn't really the dst assert(dst->isUsedFromMemory() || (dst->GetRegNum() == REG_NA) || instrIs3opImul(ins)); assert(!src->isUsedFromMemory()); memOp = dst; if (src->isContained()) { assert(src->IsCnsIntOrI()); cnsOp = src; } else { otherOp = src; } } else if (src->isContained() || src->isUsedFromSpillTemp()) { assert(!dst->isUsedFromMemory()); otherOp = dst; if ((src->IsCnsIntOrI() || src->IsCnsFltOrDbl()) && !src->isUsedFromSpillTemp()) { assert(!src->isUsedFromMemory() || src->IsCnsFltOrDbl()); cnsOp = src; } else { assert(src->isUsedFromMemory()); memOp = src; } } // At this point, we either have a memory operand or we don't. // // If we don't then the logic is very simple and we will either be emitting a // `reg, immed` instruction (if src is a cns) or a `reg, reg` instruction otherwise. // // If we do have a memory operand, the logic is a bit more complicated as we need // to do different things depending on the type of memory operand. These types include: // * Spill temp // * Indirect access // * Local variable // * Class variable // * Addressing mode [base + index * scale + offset] // * Local field // * Local variable // // Most of these types (except Indirect: Class variable and Indirect: Addressing mode) // give us a a local variable number and an offset and access memory on the stack // // Indirect: Class variable is used for access static class variables and gives us a handle // to the memory location we read from // // Indirect: Addressing mode is used for the remaining memory accesses and will give us // a base address, an index, a scale, and an offset. These are combined to let us easily // access the given memory location. // // In all of the memory access cases, we determine which form to emit (e.g. `reg, [mem]` // or `[mem], reg`) by comparing memOp to src to determine which `emitIns_*` method needs // to be called. The exception is for the `[mem], immed` case (for Indirect: Class variable) // where only src can be the immediate. if (memOp != nullptr) { TempDsc* tmpDsc = nullptr; unsigned varNum = BAD_VAR_NUM; unsigned offset = (unsigned)-1; if (memOp->isUsedFromSpillTemp()) { assert(memOp->IsRegOptional()); tmpDsc = codeGen->getSpillTempDsc(memOp); varNum = tmpDsc->tdTempNum(); offset = 0; codeGen->regSet.tmpRlsTemp(tmpDsc); } else if (memOp->isIndir()) { GenTreeIndir* memIndir = memOp->AsIndir(); GenTree* memBase = memIndir->gtOp1; switch (memBase->OperGet()) { case GT_LCL_VAR_ADDR: case GT_LCL_FLD_ADDR: { assert(memBase->isContained()); varNum = memBase->AsLclVarCommon()->GetLclNum(); offset = memBase->AsLclVarCommon()->GetLclOffs(); // Ensure that all the GenTreeIndir values are set to their defaults. assert(!memIndir->HasIndex()); assert(memIndir->Scale() == 1); assert(memIndir->Offset() == 0); break; } case GT_CLS_VAR_ADDR: { if (memOp == src) { assert(otherOp == dst); assert(cnsOp == nullptr); if (instrHasImplicitRegPairDest(ins)) { // src is a class static variable // dst is implicit - RDX:RAX emitIns_C(ins, attr, memBase->AsClsVar()->gtClsVarHnd, 0); } else { // src is a class static variable // dst is a register emitIns_R_C(ins, attr, dst->GetRegNum(), memBase->AsClsVar()->gtClsVarHnd, 0); } } else { assert(memOp == dst); if (cnsOp != nullptr) { assert(cnsOp == src); assert(otherOp == nullptr); assert(src->IsCnsIntOrI()); // src is an contained immediate // dst is a class static variable emitIns_C_I(ins, attr, memBase->AsClsVar()->gtClsVarHnd, 0, (int)src->AsIntConCommon()->IconValue()); } else { assert(otherOp == src); // src is a register // dst is a class static variable emitIns_C_R(ins, attr, memBase->AsClsVar()->gtClsVarHnd, src->GetRegNum(), 0); } } return dst->GetRegNum(); } default: // Addressing mode [base + index * scale + offset] { instrDesc* id = nullptr; if (cnsOp != nullptr) { assert(memOp == dst); assert(cnsOp == src); assert(otherOp == nullptr); assert(src->IsCnsIntOrI()); id = emitNewInstrAmdCns(attr, memIndir->Offset(), (int)src->AsIntConCommon()->IconValue()); } else { ssize_t offset = memIndir->Offset(); id = emitNewInstrAmd(attr, offset); id->idIns(ins); GenTree* regTree = (memOp == src) ? dst : src; // there must be one non-contained op assert(!regTree->isContained()); id->idReg1(regTree->GetRegNum()); } assert(id != nullptr); id->idIns(ins); // Set the instruction. // Determine the instruction format insFormat fmt = IF_NONE; if (memOp == src) { assert(cnsOp == nullptr); assert(otherOp == dst); if (instrHasImplicitRegPairDest(ins)) { fmt = emitInsModeFormat(ins, IF_ARD); } else { fmt = emitInsModeFormat(ins, IF_RRD_ARD); } } else { assert(memOp == dst); if (cnsOp != nullptr) { assert(cnsOp == src); assert(otherOp == nullptr); assert(src->IsCnsIntOrI()); fmt = emitInsModeFormat(ins, IF_ARD_CNS); } else { assert(otherOp == src); fmt = emitInsModeFormat(ins, IF_ARD_RRD); } } assert(fmt != IF_NONE); emitHandleMemOp(memIndir, id, fmt, ins); // Determine the instruction size UNATIVE_OFFSET sz = 0; if (memOp == src) { assert(otherOp == dst); assert(cnsOp == nullptr); if (instrHasImplicitRegPairDest(ins)) { sz = emitInsSizeAM(id, insCode(ins)); } else { sz = emitInsSizeAM(id, insCodeRM(ins)); } } else { assert(memOp == dst); if (cnsOp != nullptr) { assert(memOp == dst); assert(cnsOp == src); assert(otherOp == nullptr); sz = emitInsSizeAM(id, insCodeMI(ins), (int)src->AsIntConCommon()->IconValue()); } else { assert(otherOp == src); sz = emitInsSizeAM(id, insCodeMR(ins)); } } assert(sz != 0); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; return (memOp == src) ? dst->GetRegNum() : REG_NA; } } } else { switch (memOp->OperGet()) { case GT_LCL_FLD: case GT_STORE_LCL_FLD: varNum = memOp->AsLclFld()->GetLclNum(); offset = memOp->AsLclFld()->GetLclOffs(); break; case GT_LCL_VAR: { assert(memOp->IsRegOptional() || !emitComp->lvaTable[memOp->AsLclVar()->GetLclNum()].lvIsRegCandidate()); varNum = memOp->AsLclVar()->GetLclNum(); offset = 0; break; } default: unreached(); break; } } // Ensure we got a good varNum and offset. // We also need to check for `tmpDsc != nullptr` since spill temp numbers // are negative and start with -1, which also happens to be BAD_VAR_NUM. assert((varNum != BAD_VAR_NUM) || (tmpDsc != nullptr)); assert(offset != (unsigned)-1); if (memOp == src) { assert(otherOp == dst); assert(cnsOp == nullptr); if (instrHasImplicitRegPairDest(ins)) { // src is a stack based local variable // dst is implicit - RDX:RAX emitIns_S(ins, attr, varNum, offset); } else { // src is a stack based local variable // dst is a register emitIns_R_S(ins, attr, dst->GetRegNum(), varNum, offset); } } else { assert(memOp == dst); assert((dst->GetRegNum() == REG_NA) || dst->IsRegOptional()); if (cnsOp != nullptr) { assert(cnsOp == src); assert(otherOp == nullptr); assert(src->IsCnsIntOrI()); // src is an contained immediate // dst is a stack based local variable emitIns_S_I(ins, attr, varNum, offset, (int)src->AsIntConCommon()->IconValue()); } else { assert(otherOp == src); assert(!src->isContained()); // src is a register // dst is a stack based local variable emitIns_S_R(ins, attr, src->GetRegNum(), varNum, offset); } } } else if (cnsOp != nullptr) // reg, immed { assert(cnsOp == src); assert(otherOp == dst); if (src->IsCnsIntOrI()) { assert(!dst->isContained()); GenTreeIntConCommon* intCns = src->AsIntConCommon(); emitIns_R_I(ins, attr, dst->GetRegNum(), intCns->IconValue()); } else { assert(src->IsCnsFltOrDbl()); GenTreeDblCon* dblCns = src->AsDblCon(); CORINFO_FIELD_HANDLE hnd = emitFltOrDblConst(dblCns->gtDconVal, emitTypeSize(dblCns)); emitIns_R_C(ins, attr, dst->GetRegNum(), hnd, 0); } } else // reg, reg { assert(otherOp == nullptr); assert(!src->isContained() && !dst->isContained()); if (instrHasImplicitRegPairDest(ins)) { emitIns_R(ins, attr, src->GetRegNum()); } else { emitIns_R_R(ins, attr, dst->GetRegNum(), src->GetRegNum()); } } return dst->GetRegNum(); } //------------------------------------------------------------------------ // emitInsRMW: Emit logic for Read-Modify-Write binary instructions. // // Responsible for emitting a single instruction that will perform an operation of the form: // *addr = *addr <BinOp> src // For example: // ADD [RAX], RCX // // Arguments: // ins - instruction to generate // attr - emitter attribute for instruction // storeInd - indir for RMW addressing mode // src - source operand of instruction // // Assumptions: // Lowering has taken care of recognizing the StoreInd pattern of: // StoreInd( AddressTree, BinOp( Ind ( AddressTree ), Operand ) ) // The address to store is already sitting in a register. // // Notes: // This is a no-produce operation, meaning that no register output will // be produced for future use in the code stream. // void emitter::emitInsRMW(instruction ins, emitAttr attr, GenTreeStoreInd* storeInd, GenTree* src) { GenTree* addr = storeInd->Addr(); addr = addr->gtSkipReloadOrCopy(); assert(addr->OperIs(GT_LCL_VAR, GT_LCL_VAR_ADDR, GT_LEA, GT_CLS_VAR_ADDR, GT_CNS_INT)); instrDesc* id = nullptr; UNATIVE_OFFSET sz; ssize_t offset = 0; if (addr->OperGet() != GT_CLS_VAR_ADDR) { offset = storeInd->Offset(); } if (src->isContainedIntOrIImmed()) { GenTreeIntConCommon* intConst = src->AsIntConCommon(); int iconVal = (int)intConst->IconValue(); switch (ins) { case INS_rcl_N: case INS_rcr_N: case INS_rol_N: case INS_ror_N: case INS_shl_N: case INS_shr_N: case INS_sar_N: iconVal &= 0x7F; break; default: break; } if (addr->isContained() && addr->OperIsLocalAddr()) { GenTreeLclVarCommon* lclVar = addr->AsLclVarCommon(); emitIns_S_I(ins, attr, lclVar->GetLclNum(), lclVar->GetLclOffs(), iconVal); return; } else { id = emitNewInstrAmdCns(attr, offset, iconVal); emitHandleMemOp(storeInd, id, IF_ARW_CNS, ins); id->idIns(ins); sz = emitInsSizeAM(id, insCodeMI(ins), iconVal); } } else { assert(!src->isContained()); // there must be one non-contained src // ind, reg id = emitNewInstrAmd(attr, offset); emitHandleMemOp(storeInd, id, IF_ARW_RRD, ins); id->idReg1(src->GetRegNum()); id->idIns(ins); sz = emitInsSizeAM(id, insCodeMR(ins)); } id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } //------------------------------------------------------------------------ // emitInsRMW: Emit logic for Read-Modify-Write unary instructions. // // Responsible for emitting a single instruction that will perform an operation of the form: // *addr = UnaryOp *addr // For example: // NOT [RAX] // // Arguments: // ins - instruction to generate // attr - emitter attribute for instruction // storeInd - indir for RMW addressing mode // // Assumptions: // Lowering has taken care of recognizing the StoreInd pattern of: // StoreInd( AddressTree, UnaryOp( Ind ( AddressTree ) ) ) // The address to store is already sitting in a register. // // Notes: // This is a no-produce operation, meaning that no register output will // be produced for future use in the code stream. // void emitter::emitInsRMW(instruction ins, emitAttr attr, GenTreeStoreInd* storeInd) { GenTree* addr = storeInd->Addr(); addr = addr->gtSkipReloadOrCopy(); assert(addr->OperIs(GT_LCL_VAR, GT_LCL_VAR_ADDR, GT_CLS_VAR_ADDR, GT_LEA, GT_CNS_INT)); ssize_t offset = 0; if (addr->OperGet() != GT_CLS_VAR_ADDR) { offset = storeInd->Offset(); } if (addr->isContained() && addr->OperIsLocalAddr()) { GenTreeLclVarCommon* lclVar = addr->AsLclVarCommon(); emitIns_S(ins, attr, lclVar->GetLclNum(), lclVar->GetLclOffs()); return; } instrDesc* id = emitNewInstrAmd(attr, offset); emitHandleMemOp(storeInd, id, IF_ARW, ins); id->idIns(ins); UNATIVE_OFFSET sz = emitInsSizeAM(id, insCodeMR(ins)); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } /***************************************************************************** * * Add an instruction referencing a single register. */ void emitter::emitIns_R(instruction ins, emitAttr attr, regNumber reg) { emitAttr size = EA_SIZE(attr); assert(size <= EA_PTRSIZE); noway_assert(emitVerifyEncodable(ins, size, reg)); UNATIVE_OFFSET sz; instrDesc* id = emitNewInstrSmall(attr); switch (ins) { case INS_inc: case INS_dec: #ifdef TARGET_AMD64 sz = 2; // x64 has no 1-byte opcode (it is the same encoding as the REX prefix) #else // !TARGET_AMD64 if (size == EA_1BYTE) sz = 2; // Use the long form as the small one has no 'w' bit else sz = 1; // Use short form #endif // !TARGET_AMD64 break; case INS_pop: case INS_pop_hide: case INS_push: case INS_push_hide: /* We don't currently push/pop small values */ assert(size == EA_PTRSIZE); sz = 1; break; default: /* All the sixteen INS_setCCs are contiguous. */ if (INS_seto <= ins && ins <= INS_setg) { // Rough check that we used the endpoints for the range check assert(INS_seto + 0xF == INS_setg); // The caller must specify EA_1BYTE for 'attr' assert(attr == EA_1BYTE); /* We expect this to always be a 'big' opcode */ assert(insEncodeMRreg(ins, reg, attr, insCodeMR(ins)) & 0x00FF0000); size = attr; sz = 3; break; } else { sz = 2; break; } } insFormat fmt = emitInsModeFormat(ins, IF_RRD); id->idIns(ins); id->idInsFmt(fmt); id->idReg1(reg); // Vex bytes sz += emitGetAdjustedSize(ins, attr, insEncodeMRreg(ins, reg, attr, insCodeMR(ins))); // REX byte if (IsExtendedReg(reg, attr) || TakesRexWPrefix(ins, attr)) { sz += emitGetRexPrefixSize(ins); } id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; emitAdjustStackDepthPushPop(ins); } /***************************************************************************** * * Add an instruction referencing a register and a constant. */ void emitter::emitIns_R_I(instruction ins, emitAttr attr, regNumber reg, ssize_t val DEBUGARG(GenTreeFlags gtFlags)) { emitAttr size = EA_SIZE(attr); // Allow emitting SSE2/AVX SIMD instructions of R_I form that can specify EA_16BYTE or EA_32BYTE assert(size <= EA_PTRSIZE || IsSSEOrAVXInstruction(ins)); noway_assert(emitVerifyEncodable(ins, size, reg)); #ifdef TARGET_AMD64 // mov reg, imm64 is the only opcode which takes a full 8 byte immediate // all other opcodes take a sign-extended 4-byte immediate noway_assert(size < EA_8BYTE || ins == INS_mov || ((int)val == val && !EA_IS_CNS_RELOC(attr))); #endif UNATIVE_OFFSET sz; instrDesc* id; insFormat fmt = emitInsModeFormat(ins, IF_RRD_CNS); bool valInByte = ((signed char)val == (target_ssize_t)val) && (ins != INS_mov) && (ins != INS_test); // BT reg,imm might be useful but it requires special handling of the immediate value // (it is always encoded in a byte). Let's not complicate things until this is needed. assert(ins != INS_bt); // Figure out the size of the instruction switch (ins) { case INS_mov: #ifdef TARGET_AMD64 // mov reg, imm64 is equivalent to mov reg, imm32 if the high order bits are all 0 // and this isn't a reloc constant. if (((size > EA_4BYTE) && (0 == (val & 0xFFFFFFFF00000000LL))) && !EA_IS_CNS_RELOC(attr)) { attr = size = EA_4BYTE; } if (size > EA_4BYTE) { sz = 9; // Really it is 10, but we'll add one more later break; } #endif // TARGET_AMD64 sz = 5; break; case INS_rcl_N: case INS_rcr_N: case INS_rol_N: case INS_ror_N: case INS_shl_N: case INS_shr_N: case INS_sar_N: assert(val != 1); fmt = IF_RRW_SHF; sz = 3; val &= 0x7F; valInByte = true; // shift amount always placed in a byte break; default: if (EA_IS_CNS_RELOC(attr)) { valInByte = false; // relocs can't be placed in a byte } if (valInByte) { if (IsSSEOrAVXInstruction(ins)) { bool includeRexPrefixSize = true; // Do not get the RexSize() but just decide if it will be included down further and if yes, // do not include it again. if (IsExtendedReg(reg, attr) || TakesRexWPrefix(ins, size) || instrIsExtendedReg3opImul(ins)) { includeRexPrefixSize = false; } sz = emitInsSize(insCodeMI(ins), includeRexPrefixSize); sz += 1; } else if (size == EA_1BYTE && reg == REG_EAX && !instrIs3opImul(ins)) { sz = 2; } else { sz = 3; } } else { assert(!IsSSEOrAVXInstruction(ins)); if (reg == REG_EAX && !instrIs3opImul(ins)) { sz = 1; } else { sz = 2; } #ifdef TARGET_AMD64 if (size > EA_4BYTE) { // We special-case anything that takes a full 8-byte constant. sz += 4; } else #endif // TARGET_AMD64 { sz += EA_SIZE_IN_BYTES(attr); } } break; } sz += emitGetAdjustedSize(ins, attr, insCodeMI(ins)); // Do we need a REX prefix for AMD64? We need one if we are using any extended register (REX.R), or if we have a // 64-bit sized operand (REX.W). Note that IMUL in our encoding is special, with a "built-in", implicit, target // register. So we also need to check if that built-in register is an extended register. if (IsExtendedReg(reg, attr) || TakesRexWPrefix(ins, size) || instrIsExtendedReg3opImul(ins)) { sz += emitGetRexPrefixSize(ins); } id = emitNewInstrSC(attr, val); id->idIns(ins); id->idInsFmt(fmt); id->idReg1(reg); id->idCodeSize(sz); INDEBUG(id->idDebugOnlyInfo()->idFlags = gtFlags); dispIns(id); emitCurIGsize += sz; if (reg == REG_ESP) { emitAdjustStackDepth(ins, val); } } /***************************************************************************** * * Add an instruction referencing an integer constant. */ void emitter::emitIns_I(instruction ins, emitAttr attr, cnsval_ssize_t val) { UNATIVE_OFFSET sz; instrDesc* id; bool valInByte = ((signed char)val == (target_ssize_t)val); #ifdef TARGET_AMD64 // mov reg, imm64 is the only opcode which takes a full 8 byte immediate // all other opcodes take a sign-extended 4-byte immediate noway_assert(EA_SIZE(attr) < EA_8BYTE || !EA_IS_CNS_RELOC(attr)); #endif if (EA_IS_CNS_RELOC(attr)) { valInByte = false; // relocs can't be placed in a byte } switch (ins) { case INS_loop: case INS_jge: sz = 2; break; case INS_ret: sz = 3; break; case INS_push_hide: case INS_push: sz = valInByte ? 2 : 5; break; default: NO_WAY("unexpected instruction"); } id = emitNewInstrSC(attr, val); id->idIns(ins); id->idInsFmt(IF_CNS); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; emitAdjustStackDepthPushPop(ins); } /***************************************************************************** * * Add a "jump through a table" instruction. */ void emitter::emitIns_IJ(emitAttr attr, regNumber reg, unsigned base) { assert(EA_SIZE(attr) == EA_4BYTE); UNATIVE_OFFSET sz = 3 + 4; const instruction ins = INS_i_jmp; if (IsExtendedReg(reg, attr)) { sz += emitGetRexPrefixSize(ins); } instrDesc* id = emitNewInstrAmd(attr, base); id->idIns(ins); id->idInsFmt(IF_ARD); id->idAddr()->iiaAddrMode.amBaseReg = REG_NA; id->idAddr()->iiaAddrMode.amIndxReg = reg; id->idAddr()->iiaAddrMode.amScale = emitter::OPSZP; #ifdef DEBUG id->idDebugOnlyInfo()->idMemCookie = base; #endif id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } /***************************************************************************** * * Add an instruction with a static data member operand. If 'size' is 0, the * instruction operates on the address of the static member instead of its * value (e.g. "push offset clsvar", rather than "push dword ptr [clsvar]"). */ void emitter::emitIns_C(instruction ins, emitAttr attr, CORINFO_FIELD_HANDLE fldHnd, int offs) { // Static always need relocs if (!jitStaticFldIsGlobAddr(fldHnd)) { attr = EA_SET_FLG(attr, EA_DSP_RELOC_FLG); } UNATIVE_OFFSET sz; instrDesc* id; /* Are we pushing the offset of the class variable? */ if (EA_IS_OFFSET(attr)) { assert(ins == INS_push); sz = 1 + TARGET_POINTER_SIZE; id = emitNewInstrDsp(EA_1BYTE, offs); id->idIns(ins); id->idInsFmt(IF_MRD_OFF); } else { insFormat fmt = emitInsModeFormat(ins, IF_MRD); id = emitNewInstrDsp(attr, offs); id->idIns(ins); id->idInsFmt(fmt); sz = emitInsSizeCV(id, insCodeMR(ins)); } if (TakesRexWPrefix(ins, attr)) { // REX.W prefix sz += emitGetRexPrefixSize(ins); } id->idAddr()->iiaFieldHnd = fldHnd; id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; emitAdjustStackDepthPushPop(ins); } //------------------------------------------------------------------------ // IsMovInstruction: Determines whether a give instruction is a move instruction // // Arguments: // ins -- The instruction being checked // // Return Value: // true if the instruction is a qualifying move instruction; otherwise, false // // Remarks: // This methods covers most kinds of two operand move instructions that copy a // value between two registers. It does not cover all move-like instructions // and so doesn't currently cover things like movsb/movsw/movsd/movsq or cmovcc // and doesn't currently cover cases where a value is read/written from memory. // // The reason it doesn't cover all instructions was namely to limit the scope // of the initial change to that which was impactful to move elision so that // it could be centrally managed and optimized. It may be beneficial to support // the other move instructions in the future but that may require more extensive // changes to ensure relevant codegen/emit paths flow and check things correctly. bool emitter::IsMovInstruction(instruction ins) { switch (ins) { case INS_mov: case INS_movapd: case INS_movaps: case INS_movd: case INS_movdqa: case INS_movdqu: case INS_movsdsse2: case INS_movss: case INS_movsx: case INS_movupd: case INS_movups: case INS_movzx: { return true; } #if defined(TARGET_AMD64) case INS_movq: case INS_movsxd: { return true; } #endif // TARGET_AMD64 default: { return false; } } } //------------------------------------------------------------------------ // IsJccInstruction: Determine if an instruction is a conditional jump instruction. // // Arguments: // ins -- The instruction being checked // // Return Value: // true if the instruction qualifies; otherwise, false // bool emitter::IsJccInstruction(instruction ins) { return ((ins >= INS_jo) && (ins <= INS_jg)) || ((ins >= INS_l_jo) && (ins <= INS_l_jg)); } //------------------------------------------------------------------------ // IsJmpInstruction: Determine if an instruction is a jump instruction but NOT a conditional jump instruction. // // Arguments: // ins -- The instruction being checked // // Return Value: // true if the instruction qualifies; otherwise, false // bool emitter::IsJmpInstruction(instruction ins) { return (ins == INS_i_jmp) || (ins == INS_jmp) || (ins == INS_l_jmp) || (ins == INS_tail_i_jmp); } // TODO-XArch-CQ: There are places where the fact that an instruction zero-extends // is not an important detail, such as when "regular" floating-point code is generated // // This differs from cases like HWIntrinsics that deal with the entire vector and so // they need to be "aware" that a given move impacts the upper-bits. // // Ideally we can detect this difference, likely via canIgnoreSideEffects, and allow // the below optimizations for those scenarios as well. // Track whether the instruction has a zero/sign-extension or clearing of the upper-bits as a side-effect bool emitter::HasSideEffect(instruction ins, emitAttr size) { bool hasSideEffect = false; switch (ins) { case INS_mov: { // non EA_PTRSIZE moves may zero-extend the source hasSideEffect = (size != EA_PTRSIZE); break; } case INS_movapd: case INS_movaps: case INS_movdqa: case INS_movdqu: case INS_movupd: case INS_movups: { // non EA_32BYTE moves clear the upper bits under VEX encoding hasSideEffect = UseVEXEncoding() && (size != EA_32BYTE); break; } case INS_movd: { // Clears the upper bits hasSideEffect = true; break; } case INS_movsdsse2: case INS_movss: { // Clears the upper bits under VEX encoding hasSideEffect = UseVEXEncoding(); break; } case INS_movsx: case INS_movzx: { // Sign/Zero-extends the source hasSideEffect = true; break; } #if defined(TARGET_AMD64) case INS_movq: { // Clears the upper bits hasSideEffect = true; break; } case INS_movsxd: { // Sign-extends the source hasSideEffect = true; break; } #endif // TARGET_AMD64 default: { unreached(); } } return hasSideEffect; } //---------------------------------------------------------------------------------------- // IsRedundantMov: // Check if the current `mov` instruction is redundant and can be omitted. // A `mov` is redundant in following 3 cases: // // 1. Move to same register on TARGET_AMD64 // (Except 4-byte movement like "mov eax, eax" which zeros out upper bits of eax register) // // mov rax, rax // // 2. Move that is identical to last instruction emitted. // // mov rax, rbx # <-- last instruction // mov rax, rbx # <-- current instruction can be omitted. // // 3. Opposite Move as that of last instruction emitted. // // mov rax, rbx # <-- last instruction // mov rbx, rax # <-- current instruction can be omitted. // // Arguments: // ins - The current instruction // fmt - The current format // size - Operand size of current instruction // dst - The current destination // src - The current source // canIgnoreSideEffects - The move can be skipped as it doesn't represent special semantics // // Return Value: // true if the move instruction is redundant; otherwise, false. bool emitter::IsRedundantMov( instruction ins, insFormat fmt, emitAttr size, regNumber dst, regNumber src, bool canIgnoreSideEffects) { assert(IsMovInstruction(ins)); if (canIgnoreSideEffects && (dst == src)) { // These elisions used to be explicit even when optimizations were disabled // Some instructions have a side effect and shouldn't be skipped // however existing codepaths were skipping these instructions in // certain scenarios and so we skip them as well for back-compat // when canIgnoreSideEffects is true (see below for which have a // side effect). // // Long term, these paths should be audited and should likely be // replaced with copies rather than extensions. return true; } if (!emitComp->opts.OptimizationEnabled()) { // The remaining move elisions should only happen if optimizations are enabled return false; } // Skip optimization if current instruction creates a GC live value. if (EA_IS_GCREF_OR_BYREF(size)) { return false; } bool hasSideEffect = HasSideEffect(ins, size); // Check if we are already in the correct register and don't have a side effect if ((dst == src) && !hasSideEffect) { JITDUMP("\n -- suppressing mov because src and dst is same register and the mov has no side-effects.\n"); return true; } bool isFirstInstrInBlock = (emitCurIGinsCnt == 0) && ((emitCurIG->igFlags & IGF_EXTEND) == 0); // TODO-XArch-CQ: Certain instructions, such as movaps vs movups, are equivalent in // functionality even if their actual identifier differs and we should optimize these if (isFirstInstrInBlock || // Don't optimize if instruction is the first instruction in IG. (emitLastIns == nullptr) || // or if a last instruction doesn't exist (emitLastIns->idIns() != ins) || // or if the instruction is different from the last instruction (emitLastIns->idOpSize() != size) || // or if the operand size is different from the last instruction (emitLastIns->idInsFmt() != fmt)) // or if the format is different from the last instruction { return false; } regNumber lastDst = emitLastIns->idReg1(); regNumber lastSrc = emitLastIns->idReg2(); // Check if we did same move in last instruction, side effects don't matter since they already happened if ((lastDst == dst) && (lastSrc == src)) { JITDUMP("\n -- suppressing mov because last instruction already moved from src to dst register.\n"); return true; } // Check if we did a switched mov in the last instruction and don't have a side effect if ((lastDst == src) && (lastSrc == dst) && !hasSideEffect) { JITDUMP("\n -- suppressing mov because last instruction already moved from dst to src register and the mov has " "no side-effects.\n"); return true; } return false; } //------------------------------------------------------------------------ // EmitMovsxAsCwde: try to emit "movsxd rax, eax" and "movsx eax, ax" as // "cdqe" and "cwde" as a code size optimization. // // Arguments: // ins - The instruction for the original mov // size - The size of the original mov // dst - The destination register for the original mov // src - The source register for the original mov // // Return Value: // "true" if the optimization succeded, in which case the instruction can be // counted as emitted, "false" otherwise. // bool emitter::EmitMovsxAsCwde(instruction ins, emitAttr size, regNumber dst, regNumber src) { if ((src == REG_EAX) && (src == dst)) { #ifdef TARGET_64BIT // "movsxd rax, eax". if ((ins == INS_movsxd) && (size == EA_4BYTE)) { // "cdqe". emitIns(INS_cwde, EA_8BYTE); return true; } #endif // "movsx eax, ax". if ((ins == INS_movsx) && (size == EA_2BYTE)) { // "cwde". emitIns(INS_cwde, EA_4BYTE); return true; } } return false; } //------------------------------------------------------------------------ // emitIns_Mov: Emits a move instruction // // Arguments: // ins -- The instruction being emitted // attr -- The emit attribute // dstReg -- The destination register // srcReg -- The source register // canSkip -- true if the move can be elided when dstReg == srcReg, otherwise false // void emitter::emitIns_Mov(instruction ins, emitAttr attr, regNumber dstReg, regNumber srcReg, bool canSkip) { // Only move instructions can use emitIns_Mov assert(IsMovInstruction(ins)); #if DEBUG switch (ins) { case INS_mov: case INS_movsx: case INS_movzx: { assert(isGeneralRegister(dstReg) && isGeneralRegister(srcReg)); break; } case INS_movapd: case INS_movaps: case INS_movdqa: case INS_movdqu: case INS_movsdsse2: case INS_movss: case INS_movupd: case INS_movups: { assert(isFloatReg(dstReg) && isFloatReg(srcReg)); break; } case INS_movd: { assert(isFloatReg(dstReg) != isFloatReg(srcReg)); break; } #if defined(TARGET_AMD64) case INS_movq: { assert(isFloatReg(dstReg) && isFloatReg(srcReg)); break; } case INS_movsxd: { assert(isGeneralRegister(dstReg) && isGeneralRegister(srcReg)); break; } #endif // TARGET_AMD64 default: { unreached(); } } #endif emitAttr size = EA_SIZE(attr); assert(size <= EA_32BYTE); noway_assert(emitVerifyEncodable(ins, size, dstReg, srcReg)); insFormat fmt = emitInsModeFormat(ins, IF_RRD_RRD); if (IsRedundantMov(ins, fmt, attr, dstReg, srcReg, canSkip)) { return; } if (EmitMovsxAsCwde(ins, size, dstReg, srcReg)) { return; } UNATIVE_OFFSET sz = emitInsSizeRR(ins, dstReg, srcReg, attr); instrDesc* id = emitNewInstrSmall(attr); id->idIns(ins); id->idInsFmt(fmt); id->idReg1(dstReg); id->idReg2(srcReg); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } /***************************************************************************** * * Add an instruction with two register operands. */ void emitter::emitIns_R_R(instruction ins, emitAttr attr, regNumber reg1, regNumber reg2) { if (IsMovInstruction(ins)) { assert(!"Please use emitIns_Mov() to correctly handle move elision"); emitIns_Mov(ins, attr, reg1, reg2, /* canSkip */ false); } emitAttr size = EA_SIZE(attr); assert(size <= EA_32BYTE); noway_assert(emitVerifyEncodable(ins, size, reg1, reg2)); UNATIVE_OFFSET sz = emitInsSizeRR(ins, reg1, reg2, attr); /* Special case: "XCHG" uses a different format */ insFormat fmt = (ins == INS_xchg) ? IF_RRW_RRW : emitInsModeFormat(ins, IF_RRD_RRD); instrDesc* id = emitNewInstrSmall(attr); id->idIns(ins); id->idInsFmt(fmt); id->idReg1(reg1); id->idReg2(reg2); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } /***************************************************************************** * * Add an instruction with two register operands and an integer constant. */ void emitter::emitIns_R_R_I(instruction ins, emitAttr attr, regNumber reg1, regNumber reg2, int ival) { #ifdef TARGET_AMD64 // mov reg, imm64 is the only opcode which takes a full 8 byte immediate // all other opcodes take a sign-extended 4-byte immediate noway_assert(EA_SIZE(attr) < EA_8BYTE || !EA_IS_CNS_RELOC(attr)); #endif instrDesc* id = emitNewInstrSC(attr, ival); id->idIns(ins); id->idInsFmt(IF_RRW_RRW_CNS); id->idReg1(reg1); id->idReg2(reg2); code_t code = 0; switch (ins) { case INS_pextrb: case INS_pextrd: case INS_pextrq: case INS_pextrw_sse41: case INS_extractps: case INS_vextractf128: case INS_vextracti128: case INS_shld: case INS_shrd: { code = insCodeMR(ins); break; } case INS_psrldq: case INS_pslldq: { code = insCodeMI(ins); break; } default: { code = insCodeRM(ins); break; } } UNATIVE_OFFSET sz = emitInsSizeRR(id, code, ival); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } void emitter::emitIns_AR(instruction ins, emitAttr attr, regNumber base, int offs) { assert(ins == INS_prefetcht0 || ins == INS_prefetcht1 || ins == INS_prefetcht2 || ins == INS_prefetchnta); instrDesc* id = emitNewInstrAmd(attr, offs); id->idIns(ins); id->idInsFmt(IF_ARD); id->idAddr()->iiaAddrMode.amBaseReg = base; id->idAddr()->iiaAddrMode.amIndxReg = REG_NA; UNATIVE_OFFSET sz = emitInsSizeAM(id, insCodeMR(ins)); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } //------------------------------------------------------------------------ // emitIns_AR_R_R: emits the code for an instruction that takes a base memory register, two register operands // and that does not return a value // // Arguments: // ins -- The instruction being emitted // attr -- The emit attribute // targetReg -- The target register // op2Reg -- The register of the second operand // op3Reg -- The register of the third operand // base -- The base register used for the memory address (first operand) // offs -- The offset from base // void emitter::emitIns_AR_R_R( instruction ins, emitAttr attr, regNumber op2Reg, regNumber op3Reg, regNumber base, int offs) { assert(IsSSEOrAVXInstruction(ins)); assert(IsThreeOperandAVXInstruction(ins)); instrDesc* id = emitNewInstrAmd(attr, offs); id->idIns(ins); id->idReg1(op2Reg); id->idReg2(op3Reg); id->idInsFmt(IF_AWR_RRD_RRD); id->idAddr()->iiaAddrMode.amBaseReg = base; id->idAddr()->iiaAddrMode.amIndxReg = REG_NA; UNATIVE_OFFSET sz = emitInsSizeAM(id, insCodeMR(ins)); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } void emitter::emitIns_R_A(instruction ins, emitAttr attr, regNumber reg1, GenTreeIndir* indir) { ssize_t offs = indir->Offset(); instrDesc* id = emitNewInstrAmd(attr, offs); id->idIns(ins); id->idReg1(reg1); emitHandleMemOp(indir, id, IF_RRW_ARD, ins); UNATIVE_OFFSET sz = emitInsSizeAM(id, insCodeRM(ins)); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } void emitter::emitIns_R_A_I(instruction ins, emitAttr attr, regNumber reg1, GenTreeIndir* indir, int ival) { noway_assert(emitVerifyEncodable(ins, EA_SIZE(attr), reg1)); assert(IsSSEOrAVXInstruction(ins)); ssize_t offs = indir->Offset(); instrDesc* id = emitNewInstrAmdCns(attr, offs, ival); id->idIns(ins); id->idReg1(reg1); emitHandleMemOp(indir, id, IF_RRW_ARD_CNS, ins); UNATIVE_OFFSET sz = emitInsSizeAM(id, insCodeRM(ins), ival); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } void emitter::emitIns_R_AR_I(instruction ins, emitAttr attr, regNumber reg1, regNumber base, int offs, int ival) { noway_assert(emitVerifyEncodable(ins, EA_SIZE(attr), reg1)); assert(IsSSEOrAVXInstruction(ins)); instrDesc* id = emitNewInstrAmdCns(attr, offs, ival); id->idIns(ins); id->idReg1(reg1); id->idInsFmt(IF_RRW_ARD_CNS); id->idAddr()->iiaAddrMode.amBaseReg = base; id->idAddr()->iiaAddrMode.amIndxReg = REG_NA; UNATIVE_OFFSET sz = emitInsSizeAM(id, insCodeRM(ins), ival); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } void emitter::emitIns_R_C_I( instruction ins, emitAttr attr, regNumber reg1, CORINFO_FIELD_HANDLE fldHnd, int offs, int ival) { // Static always need relocs if (!jitStaticFldIsGlobAddr(fldHnd)) { attr = EA_SET_FLG(attr, EA_DSP_RELOC_FLG); } noway_assert(emitVerifyEncodable(ins, EA_SIZE(attr), reg1)); assert(IsSSEOrAVXInstruction(ins)); instrDesc* id = emitNewInstrCnsDsp(attr, ival, offs); id->idIns(ins); id->idInsFmt(IF_RRW_MRD_CNS); id->idReg1(reg1); id->idAddr()->iiaFieldHnd = fldHnd; UNATIVE_OFFSET sz = emitInsSizeCV(id, insCodeRM(ins), ival); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } void emitter::emitIns_R_S_I(instruction ins, emitAttr attr, regNumber reg1, int varx, int offs, int ival) { noway_assert(emitVerifyEncodable(ins, EA_SIZE(attr), reg1)); assert(IsSSEOrAVXInstruction(ins)); instrDesc* id = emitNewInstrCns(attr, ival); id->idIns(ins); id->idInsFmt(IF_RRW_SRD_CNS); id->idReg1(reg1); id->idAddr()->iiaLclVar.initLclVarAddr(varx, offs); #ifdef DEBUG id->idDebugOnlyInfo()->idVarRefOffs = emitVarRefOffs; #endif UNATIVE_OFFSET sz = emitInsSizeSV(id, insCodeRM(ins), varx, offs, ival); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } void emitter::emitIns_R_R_A(instruction ins, emitAttr attr, regNumber reg1, regNumber reg2, GenTreeIndir* indir) { assert(IsSSEOrAVXInstruction(ins)); assert(IsThreeOperandAVXInstruction(ins)); ssize_t offs = indir->Offset(); instrDesc* id = emitNewInstrAmd(attr, offs); id->idIns(ins); id->idReg1(reg1); id->idReg2(reg2); emitHandleMemOp(indir, id, IF_RWR_RRD_ARD, ins); UNATIVE_OFFSET sz = emitInsSizeAM(id, insCodeRM(ins)); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } void emitter::emitIns_R_R_AR(instruction ins, emitAttr attr, regNumber reg1, regNumber reg2, regNumber base, int offs) { assert(IsSSEOrAVXInstruction(ins)); assert(IsThreeOperandAVXInstruction(ins)); instrDesc* id = emitNewInstrAmd(attr, offs); id->idIns(ins); id->idReg1(reg1); id->idReg2(reg2); id->idInsFmt(IF_RWR_RRD_ARD); id->idAddr()->iiaAddrMode.amBaseReg = base; id->idAddr()->iiaAddrMode.amIndxReg = REG_NA; UNATIVE_OFFSET sz = emitInsSizeAM(id, insCodeRM(ins)); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } //------------------------------------------------------------------------ // IsAVX2GatherInstruction: return true if the instruction is AVX2 Gather // // Arguments: // ins - the instruction to check // Return Value: // true if the instruction is AVX2 Gather // bool IsAVX2GatherInstruction(instruction ins) { switch (ins) { case INS_vpgatherdd: case INS_vpgatherdq: case INS_vpgatherqd: case INS_vpgatherqq: case INS_vgatherdps: case INS_vgatherdpd: case INS_vgatherqps: case INS_vgatherqpd: return true; default: return false; } } //------------------------------------------------------------------------ // emitIns_R_AR_R: Emits an AVX2 Gather instructions // // Arguments: // ins - the instruction to emit // attr - the instruction operand size // reg1 - the destination and first source operand // reg2 - the mask operand (encoded in VEX.vvvv) // base - the base register of address to load // index - the index register of VSIB // scale - the scale number of VSIB // offs - the offset added to the memory address from base // void emitter::emitIns_R_AR_R(instruction ins, emitAttr attr, regNumber reg1, regNumber reg2, regNumber base, regNumber index, int scale, int offs) { assert(IsAVX2GatherInstruction(ins)); instrDesc* id = emitNewInstrAmd(attr, offs); id->idIns(ins); id->idReg1(reg1); id->idReg2(reg2); id->idInsFmt(IF_RWR_ARD_RRD); id->idAddr()->iiaAddrMode.amBaseReg = base; id->idAddr()->iiaAddrMode.amIndxReg = index; id->idAddr()->iiaAddrMode.amScale = emitEncodeSize((emitAttr)scale); UNATIVE_OFFSET sz = emitInsSizeAM(id, insCodeRM(ins)); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } void emitter::emitIns_R_R_C( instruction ins, emitAttr attr, regNumber reg1, regNumber reg2, CORINFO_FIELD_HANDLE fldHnd, int offs) { assert(IsSSEOrAVXInstruction(ins)); assert(IsThreeOperandAVXInstruction(ins)); // Static always need relocs if (!jitStaticFldIsGlobAddr(fldHnd)) { attr = EA_SET_FLG(attr, EA_DSP_RELOC_FLG); } instrDesc* id = emitNewInstrDsp(attr, offs); id->idIns(ins); id->idInsFmt(IF_RWR_RRD_MRD); id->idReg1(reg1); id->idReg2(reg2); id->idAddr()->iiaFieldHnd = fldHnd; UNATIVE_OFFSET sz = emitInsSizeCV(id, insCodeRM(ins)); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } /***************************************************************************** * * Add an instruction with three register operands. */ void emitter::emitIns_R_R_R(instruction ins, emitAttr attr, regNumber targetReg, regNumber reg1, regNumber reg2) { assert(IsSSEOrAVXInstruction(ins)); assert(IsThreeOperandAVXInstruction(ins)); instrDesc* id = emitNewInstr(attr); id->idIns(ins); id->idInsFmt(IF_RWR_RRD_RRD); id->idReg1(targetReg); id->idReg2(reg1); id->idReg3(reg2); UNATIVE_OFFSET sz = emitInsSizeRR(id, insCodeRM(ins)); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } void emitter::emitIns_R_R_S(instruction ins, emitAttr attr, regNumber reg1, regNumber reg2, int varx, int offs) { assert(IsSSEOrAVXInstruction(ins)); assert(IsThreeOperandAVXInstruction(ins)); instrDesc* id = emitNewInstr(attr); id->idIns(ins); id->idInsFmt(IF_RWR_RRD_SRD); id->idReg1(reg1); id->idReg2(reg2); id->idAddr()->iiaLclVar.initLclVarAddr(varx, offs); #ifdef DEBUG id->idDebugOnlyInfo()->idVarRefOffs = emitVarRefOffs; #endif UNATIVE_OFFSET sz = emitInsSizeSV(id, insCodeRM(ins), varx, offs); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } void emitter::emitIns_R_R_A_I( instruction ins, emitAttr attr, regNumber reg1, regNumber reg2, GenTreeIndir* indir, int ival, insFormat fmt) { assert(IsSSEOrAVXInstruction(ins)); assert(IsThreeOperandAVXInstruction(ins)); ssize_t offs = indir->Offset(); instrDesc* id = emitNewInstrAmdCns(attr, offs, ival); id->idIns(ins); id->idReg1(reg1); id->idReg2(reg2); emitHandleMemOp(indir, id, fmt, ins); UNATIVE_OFFSET sz = emitInsSizeAM(id, insCodeRM(ins), ival); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } void emitter::emitIns_R_R_AR_I( instruction ins, emitAttr attr, regNumber reg1, regNumber reg2, regNumber base, int offs, int ival) { assert(IsSSEOrAVXInstruction(ins)); assert(IsThreeOperandAVXInstruction(ins)); instrDesc* id = emitNewInstrAmdCns(attr, offs, ival); id->idIns(ins); id->idReg1(reg1); id->idReg2(reg2); id->idInsFmt(IF_RWR_RRD_ARD_CNS); id->idAddr()->iiaAddrMode.amBaseReg = base; id->idAddr()->iiaAddrMode.amIndxReg = REG_NA; UNATIVE_OFFSET sz = emitInsSizeAM(id, insCodeRM(ins), ival); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } void emitter::emitIns_R_R_C_I( instruction ins, emitAttr attr, regNumber reg1, regNumber reg2, CORINFO_FIELD_HANDLE fldHnd, int offs, int ival) { assert(IsSSEOrAVXInstruction(ins)); assert(IsThreeOperandAVXInstruction(ins)); // Static always need relocs if (!jitStaticFldIsGlobAddr(fldHnd)) { attr = EA_SET_FLG(attr, EA_DSP_RELOC_FLG); } instrDesc* id = emitNewInstrCnsDsp(attr, ival, offs); id->idIns(ins); id->idInsFmt(IF_RWR_RRD_MRD_CNS); id->idReg1(reg1); id->idReg2(reg2); id->idAddr()->iiaFieldHnd = fldHnd; UNATIVE_OFFSET sz = emitInsSizeCV(id, insCodeRM(ins), ival); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } /********************************************************************************** * emitIns_R_R_R_I: Add an instruction with three register operands and an immediate. * * Arguments: * ins - the instruction to add * attr - the emitter attribute for instruction * targetReg - the target (destination) register * reg1 - the first source register * reg2 - the second source register * ival - the immediate value */ void emitter::emitIns_R_R_R_I( instruction ins, emitAttr attr, regNumber targetReg, regNumber reg1, regNumber reg2, int ival) { assert(IsSSEOrAVXInstruction(ins)); assert(IsThreeOperandAVXInstruction(ins)); instrDesc* id = emitNewInstrCns(attr, ival); id->idIns(ins); id->idInsFmt(IF_RWR_RRD_RRD_CNS); id->idReg1(targetReg); id->idReg2(reg1); id->idReg3(reg2); code_t code = 0; switch (ins) { case INS_pextrb: case INS_pextrd: case INS_pextrq: case INS_pextrw_sse41: case INS_extractps: case INS_vextractf128: case INS_vextracti128: { code = insCodeMR(ins); break; } case INS_psrldq: case INS_pslldq: { code = insCodeMI(ins); break; } default: { code = insCodeRM(ins); break; } } UNATIVE_OFFSET sz = emitInsSizeRR(id, code, ival); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } void emitter::emitIns_R_R_S_I( instruction ins, emitAttr attr, regNumber reg1, regNumber reg2, int varx, int offs, int ival) { assert(IsSSEOrAVXInstruction(ins)); assert(IsThreeOperandAVXInstruction(ins)); instrDesc* id = emitNewInstrCns(attr, ival); id->idIns(ins); id->idInsFmt(IF_RWR_RRD_SRD_CNS); id->idReg1(reg1); id->idReg2(reg2); id->idAddr()->iiaLclVar.initLclVarAddr(varx, offs); #ifdef DEBUG id->idDebugOnlyInfo()->idVarRefOffs = emitVarRefOffs; #endif UNATIVE_OFFSET sz = emitInsSizeSV(id, insCodeRM(ins), varx, offs, ival); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } //------------------------------------------------------------------------ // encodeXmmRegAsIval: Encodes a XMM register into imm[7:4] for use by a SIMD instruction // // Arguments // opReg -- The register being encoded // // Returns: // opReg encoded in imm[7:4] static int encodeXmmRegAsIval(regNumber opReg) { // AVX/AVX2 supports 4-reg format for vblendvps/vblendvpd/vpblendvb, // which encodes the fourth register into imm8[7:4] assert(opReg >= XMMBASE); int ival = (opReg - XMMBASE) << 4; assert((ival >= 0) && (ival <= 255)); return (int8_t)ival; } //------------------------------------------------------------------------ // emitIns_R_R_A_R: emits the code for an instruction that takes a register operand, a GenTreeIndir address, // another register operand, and that returns a value in register // // Arguments: // ins -- The instruction being emitted // attr -- The emit attribute // targetReg -- The target register // op1Reg -- The register of the first operand // op3Reg -- The register of the third operand // indir -- The GenTreeIndir used for the memory address // // Remarks: // op2 is built from indir // void emitter::emitIns_R_R_A_R( instruction ins, emitAttr attr, regNumber targetReg, regNumber op1Reg, regNumber op3Reg, GenTreeIndir* indir) { assert(isAvxBlendv(ins)); assert(UseVEXEncoding()); int ival = encodeXmmRegAsIval(op3Reg); ssize_t offs = indir->Offset(); instrDesc* id = emitNewInstrAmdCns(attr, offs, ival); id->idIns(ins); id->idReg1(targetReg); id->idReg2(op1Reg); emitHandleMemOp(indir, id, IF_RWR_RRD_ARD_RRD, ins); UNATIVE_OFFSET sz = emitInsSizeAM(id, insCodeRM(ins), ival); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } //------------------------------------------------------------------------ // emitIns_R_R_AR_R: emits the code for an instruction that takes a register operand, a base memory // register, another register operand, and that returns a value in register // // Arguments: // ins -- The instruction being emitted // attr -- The emit attribute // targetReg -- The target register // op1Reg -- The register of the first operands // op3Reg -- The register of the third operand // base -- The base register used for the memory address // offs -- The offset added to the memory address from base // // Remarks: // op2 is built from base + offs // void emitter::emitIns_R_R_AR_R( instruction ins, emitAttr attr, regNumber targetReg, regNumber op1Reg, regNumber op3Reg, regNumber base, int offs) { assert(isAvxBlendv(ins)); assert(UseVEXEncoding()); int ival = encodeXmmRegAsIval(op3Reg); instrDesc* id = emitNewInstrAmdCns(attr, offs, ival); id->idIns(ins); id->idReg1(targetReg); id->idReg2(op1Reg); id->idInsFmt(IF_RWR_RRD_ARD_RRD); id->idAddr()->iiaAddrMode.amBaseReg = base; id->idAddr()->iiaAddrMode.amIndxReg = REG_NA; UNATIVE_OFFSET sz = emitInsSizeAM(id, insCodeRM(ins), ival); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } //------------------------------------------------------------------------ // emitIns_R_R_C_R: emits the code for an instruction that takes a register operand, a field handle + // offset, another register operand, and that returns a value in register // // Arguments: // ins -- The instruction being emitted // attr -- The emit attribute // targetReg -- The target register // op1Reg -- The register of the first operand // op3Reg -- The register of the third operand // fldHnd -- The CORINFO_FIELD_HANDLE used for the memory address // offs -- The offset added to the memory address from fldHnd // // Remarks: // op2 is built from fldHnd + offs // void emitter::emitIns_R_R_C_R(instruction ins, emitAttr attr, regNumber targetReg, regNumber op1Reg, regNumber op3Reg, CORINFO_FIELD_HANDLE fldHnd, int offs) { assert(isAvxBlendv(ins)); assert(UseVEXEncoding()); // Static always need relocs if (!jitStaticFldIsGlobAddr(fldHnd)) { attr = EA_SET_FLG(attr, EA_DSP_RELOC_FLG); } int ival = encodeXmmRegAsIval(op3Reg); instrDesc* id = emitNewInstrCnsDsp(attr, ival, offs); id->idIns(ins); id->idReg1(targetReg); id->idReg2(op1Reg); id->idInsFmt(IF_RWR_RRD_MRD_RRD); id->idAddr()->iiaFieldHnd = fldHnd; UNATIVE_OFFSET sz = emitInsSizeCV(id, insCodeRM(ins), ival); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } //------------------------------------------------------------------------ // emitIns_R_R_R_S: emits the code for a instruction that takes a register operand, a variable index + // offset, another register operand, and that returns a value in register // // Arguments: // ins -- The instruction being emitted // attr -- The emit attribute // targetReg -- The target register // op1Reg -- The register of the first operand // op3Reg -- The register of the third operand // varx -- The variable index used for the memory address // offs -- The offset added to the memory address from varx // // Remarks: // op2 is built from varx + offs // void emitter::emitIns_R_R_S_R( instruction ins, emitAttr attr, regNumber targetReg, regNumber op1Reg, regNumber op3Reg, int varx, int offs) { assert(isAvxBlendv(ins)); assert(UseVEXEncoding()); int ival = encodeXmmRegAsIval(op3Reg); instrDesc* id = emitNewInstrCns(attr, ival); id->idIns(ins); id->idReg1(targetReg); id->idReg2(op1Reg); id->idInsFmt(IF_RWR_RRD_SRD_RRD); id->idAddr()->iiaLclVar.initLclVarAddr(varx, offs); UNATIVE_OFFSET sz = emitInsSizeSV(id, insCodeRM(ins), varx, offs, ival); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } void emitter::emitIns_R_R_R_R( instruction ins, emitAttr attr, regNumber targetReg, regNumber reg1, regNumber reg2, regNumber reg3) { assert(isAvxBlendv(ins)); assert(UseVEXEncoding()); int ival = encodeXmmRegAsIval(reg3); instrDesc* id = emitNewInstrCns(attr, ival); id->idIns(ins); id->idInsFmt(IF_RWR_RRD_RRD_RRD); id->idReg1(targetReg); id->idReg2(reg1); id->idReg3(reg2); id->idReg4(reg3); UNATIVE_OFFSET sz = emitInsSizeRR(id, insCodeRM(ins), ival); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } /***************************************************************************** * * Add an instruction with a register + static member operands. */ void emitter::emitIns_R_C(instruction ins, emitAttr attr, regNumber reg, CORINFO_FIELD_HANDLE fldHnd, int offs) { // Static always need relocs if (!jitStaticFldIsGlobAddr(fldHnd)) { attr = EA_SET_FLG(attr, EA_DSP_RELOC_FLG); } emitAttr size = EA_SIZE(attr); assert(size <= EA_32BYTE); noway_assert(emitVerifyEncodable(ins, size, reg)); UNATIVE_OFFSET sz; instrDesc* id; // Are we MOV'ing the offset of the class variable into EAX? if (EA_IS_OFFSET(attr)) { id = emitNewInstrDsp(EA_1BYTE, offs); id->idIns(ins); id->idInsFmt(IF_RWR_MRD_OFF); id->idReg1(reg); assert(ins == INS_mov && reg == REG_EAX); // Special case: "mov eax, [addr]" is smaller sz = 1 + TARGET_POINTER_SIZE; } else { insFormat fmt = emitInsModeFormat(ins, IF_RRD_MRD); id = emitNewInstrDsp(attr, offs); id->idIns(ins); id->idInsFmt(fmt); id->idReg1(reg); #ifdef TARGET_X86 // Special case: "mov eax, [addr]" is smaller. // This case is not enabled for amd64 as it always uses RIP relative addressing // and it results in smaller instruction size than encoding 64-bit addr in the // instruction. if (ins == INS_mov && reg == REG_EAX) { sz = 1 + TARGET_POINTER_SIZE; if (size == EA_2BYTE) sz += 1; } else #endif // TARGET_X86 { sz = emitInsSizeCV(id, insCodeRM(ins)); } // Special case: mov reg, fs:[ddd] if (fldHnd == FLD_GLOBAL_FS) { sz += 1; } } id->idCodeSize(sz); id->idAddr()->iiaFieldHnd = fldHnd; dispIns(id); emitCurIGsize += sz; } /***************************************************************************** * * Add an instruction with a static member + register operands. */ void emitter::emitIns_C_R(instruction ins, emitAttr attr, CORINFO_FIELD_HANDLE fldHnd, regNumber reg, int offs) { // Static always need relocs if (!jitStaticFldIsGlobAddr(fldHnd)) { attr = EA_SET_FLG(attr, EA_DSP_RELOC_FLG); } emitAttr size = EA_SIZE(attr); #if defined(TARGET_X86) // For x86 it is valid to storeind a double sized operand in an xmm reg to memory assert(size <= EA_8BYTE); #else assert(size <= EA_PTRSIZE); #endif noway_assert(emitVerifyEncodable(ins, size, reg)); instrDesc* id = emitNewInstrDsp(attr, offs); insFormat fmt = emitInsModeFormat(ins, IF_MRD_RRD); id->idIns(ins); id->idInsFmt(fmt); id->idReg1(reg); UNATIVE_OFFSET sz; #ifdef TARGET_X86 // Special case: "mov [addr], EAX" is smaller. // This case is not enable for amd64 as it always uses RIP relative addressing // and it will result in smaller instruction size than encoding 64-bit addr in // the instruction. if (ins == INS_mov && reg == REG_EAX) { sz = 1 + TARGET_POINTER_SIZE; if (size == EA_2BYTE) sz += 1; // REX prefix if (TakesRexWPrefix(ins, attr) || IsExtendedReg(reg, attr)) { sz += emitGetRexPrefixSize(ins); } } else #endif // TARGET_X86 { sz = emitInsSizeCV(id, insCodeMR(ins)); } // Special case: mov reg, fs:[ddd] if (fldHnd == FLD_GLOBAL_FS) { sz += 1; } id->idCodeSize(sz); id->idAddr()->iiaFieldHnd = fldHnd; dispIns(id); emitCurIGsize += sz; } /***************************************************************************** * * Add an instruction with a static member + constant. */ void emitter::emitIns_C_I(instruction ins, emitAttr attr, CORINFO_FIELD_HANDLE fldHnd, int offs, int val) { // Static always need relocs if (!jitStaticFldIsGlobAddr(fldHnd)) { attr = EA_SET_FLG(attr, EA_DSP_RELOC_FLG); } insFormat fmt; switch (ins) { case INS_rcl_N: case INS_rcr_N: case INS_rol_N: case INS_ror_N: case INS_shl_N: case INS_shr_N: case INS_sar_N: assert(val != 1); fmt = IF_MRW_SHF; val &= 0x7F; break; default: fmt = emitInsModeFormat(ins, IF_MRD_CNS); break; } instrDesc* id = emitNewInstrCnsDsp(attr, val, offs); id->idIns(ins); id->idInsFmt(fmt); id->idAddr()->iiaFieldHnd = fldHnd; code_t code = insCodeMI(ins); UNATIVE_OFFSET sz = emitInsSizeCV(id, code, val); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } void emitter::emitIns_J_S(instruction ins, emitAttr attr, BasicBlock* dst, int varx, int offs) { assert(ins == INS_mov); assert(dst->bbFlags & BBF_HAS_LABEL); instrDescLbl* id = emitNewInstrLbl(); id->idIns(ins); id->idInsFmt(IF_SWR_LABEL); id->idAddr()->iiaBBlabel = dst; /* The label reference is always long */ id->idjShort = 0; id->idjKeepLong = 1; /* Record the current IG and offset within it */ id->idjIG = emitCurIG; id->idjOffs = emitCurIGsize; /* Append this instruction to this IG's jump list */ id->idjNext = emitCurIGjmpList; emitCurIGjmpList = id; UNATIVE_OFFSET sz = sizeof(INT32) + emitInsSizeSV(id, insCodeMI(ins), varx, offs); id->dstLclVar.initLclVarAddr(varx, offs); #ifdef DEBUG id->idDebugOnlyInfo()->idVarRefOffs = emitVarRefOffs; #endif #if EMITTER_STATS emitTotalIGjmps++; #endif #ifndef TARGET_AMD64 // Storing the address of a basicBlock will need a reloc // as the instruction uses the absolute address, // not a relative address. // // On Amd64, Absolute code addresses should always go through a reloc to // to be encoded as RIP rel32 offset. if (emitComp->opts.compReloc) #endif { id->idSetIsDspReloc(); } id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } /***************************************************************************** * * Add a label instruction. */ void emitter::emitIns_R_L(instruction ins, emitAttr attr, BasicBlock* dst, regNumber reg) { assert(ins == INS_lea); assert(dst->bbFlags & BBF_HAS_LABEL); instrDescJmp* id = emitNewInstrJmp(); id->idIns(ins); id->idReg1(reg); id->idInsFmt(IF_RWR_LABEL); id->idOpSize(EA_SIZE(attr)); // emitNewInstrJmp() sets the size (incorrectly) to EA_1BYTE id->idAddr()->iiaBBlabel = dst; /* The label reference is always long */ id->idjShort = 0; id->idjKeepLong = 1; /* Record the current IG and offset within it */ id->idjIG = emitCurIG; id->idjOffs = emitCurIGsize; /* Append this instruction to this IG's jump list */ id->idjNext = emitCurIGjmpList; emitCurIGjmpList = id; #ifdef DEBUG // Mark the catch return if (emitComp->compCurBB->bbJumpKind == BBJ_EHCATCHRET) { id->idDebugOnlyInfo()->idCatchRet = true; } #endif // DEBUG #if EMITTER_STATS emitTotalIGjmps++; #endif // Set the relocation flags - these give hint to zap to perform // relocation of the specified 32bit address. // // Note the relocation flags influence the size estimate. id->idSetRelocFlags(attr); UNATIVE_OFFSET sz = emitInsSizeAM(id, insCodeRM(ins)); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } /***************************************************************************** * * The following adds instructions referencing address modes. */ void emitter::emitIns_I_AR(instruction ins, emitAttr attr, int val, regNumber reg, int disp) { assert((CodeGen::instIsFP(ins) == false) && (EA_SIZE(attr) <= EA_8BYTE)); #ifdef TARGET_AMD64 // mov reg, imm64 is the only opcode which takes a full 8 byte immediate // all other opcodes take a sign-extended 4-byte immediate noway_assert(EA_SIZE(attr) < EA_8BYTE || !EA_IS_CNS_RELOC(attr)); #endif insFormat fmt; switch (ins) { case INS_rcl_N: case INS_rcr_N: case INS_rol_N: case INS_ror_N: case INS_shl_N: case INS_shr_N: case INS_sar_N: assert(val != 1); fmt = IF_ARW_SHF; val &= 0x7F; break; default: fmt = emitInsModeFormat(ins, IF_ARD_CNS); break; } /* Useful if you want to trap moves with 0 constant if (ins == INS_mov && val == 0 && EA_SIZE(attr) >= EA_4BYTE) { printf("MOV 0\n"); } */ UNATIVE_OFFSET sz; instrDesc* id = emitNewInstrAmdCns(attr, disp, val); id->idIns(ins); id->idInsFmt(fmt); id->idAddr()->iiaAddrMode.amBaseReg = reg; id->idAddr()->iiaAddrMode.amIndxReg = REG_NA; assert(emitGetInsAmdAny(id) == disp); // make sure "disp" is stored properly sz = emitInsSizeAM(id, insCodeMI(ins), val); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } void emitter::emitIns_I_AI(instruction ins, emitAttr attr, int val, ssize_t disp) { assert((CodeGen::instIsFP(ins) == false) && (EA_SIZE(attr) <= EA_8BYTE)); #ifdef TARGET_AMD64 // mov reg, imm64 is the only opcode which takes a full 8 byte immediate // all other opcodes take a sign-extended 4-byte immediate noway_assert(EA_SIZE(attr) < EA_8BYTE || !EA_IS_CNS_RELOC(attr)); #endif insFormat fmt; switch (ins) { case INS_rcl_N: case INS_rcr_N: case INS_rol_N: case INS_ror_N: case INS_shl_N: case INS_shr_N: case INS_sar_N: assert(val != 1); fmt = IF_ARW_SHF; val &= 0x7F; break; default: fmt = emitInsModeFormat(ins, IF_ARD_CNS); break; } /* Useful if you want to trap moves with 0 constant if (ins == INS_mov && val == 0 && EA_SIZE(attr) >= EA_4BYTE) { printf("MOV 0\n"); } */ UNATIVE_OFFSET sz; instrDesc* id = emitNewInstrAmdCns(attr, disp, val); id->idIns(ins); id->idInsFmt(fmt); id->idAddr()->iiaAddrMode.amBaseReg = REG_NA; id->idAddr()->iiaAddrMode.amIndxReg = REG_NA; assert(emitGetInsAmdAny(id) == disp); // make sure "disp" is stored properly sz = emitInsSizeAM(id, insCodeMI(ins), val); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } void emitter::emitIns_R_AR(instruction ins, emitAttr attr, regNumber reg, regNumber base, int disp) { emitIns_R_ARX(ins, attr, reg, base, REG_NA, 1, disp); } void emitter::emitIns_R_AI(instruction ins, emitAttr attr, regNumber ireg, ssize_t disp) { assert((CodeGen::instIsFP(ins) == false) && (EA_SIZE(attr) <= EA_8BYTE) && (ireg != REG_NA)); noway_assert(emitVerifyEncodable(ins, EA_SIZE(attr), ireg)); UNATIVE_OFFSET sz; instrDesc* id = emitNewInstrAmd(attr, disp); insFormat fmt = emitInsModeFormat(ins, IF_RRD_ARD); id->idIns(ins); id->idInsFmt(fmt); id->idReg1(ireg); id->idAddr()->iiaAddrMode.amBaseReg = REG_NA; id->idAddr()->iiaAddrMode.amIndxReg = REG_NA; assert(emitGetInsAmdAny(id) == disp); // make sure "disp" is stored properly sz = emitInsSizeAM(id, insCodeRM(ins)); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } void emitter::emitIns_AR_R(instruction ins, emitAttr attr, regNumber reg, regNumber base, cnsval_ssize_t disp) { emitIns_ARX_R(ins, attr, reg, base, REG_NA, 1, disp); } //------------------------------------------------------------------------ // emitIns_S_R_I: emits the code for an instruction that takes a stack operand, // a register operand, and an immediate. // // Arguments: // ins - The instruction being emitted // attr - The emit attribute // varNum - The varNum of the stack operand // offs - The offset for the stack operand // reg - The register operand // ival - The immediate value // void emitter::emitIns_S_R_I(instruction ins, emitAttr attr, int varNum, int offs, regNumber reg, int ival) { // This is only used for INS_vextracti128 and INS_vextractf128, and for these 'ival' must be 0 or 1. assert(ins == INS_vextracti128 || ins == INS_vextractf128); assert((ival == 0) || (ival == 1)); instrDesc* id = emitNewInstrAmdCns(attr, 0, ival); id->idIns(ins); id->idInsFmt(IF_SWR_RRD_CNS); id->idReg1(reg); id->idAddr()->iiaLclVar.initLclVarAddr(varNum, offs); #ifdef DEBUG id->idDebugOnlyInfo()->idVarRefOffs = emitVarRefOffs; #endif UNATIVE_OFFSET sz = emitInsSizeSV(id, insCodeMR(ins), varNum, offs, ival); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } void emitter::emitIns_A_R_I(instruction ins, emitAttr attr, GenTreeIndir* indir, regNumber reg, int imm) { assert((ins == INS_vextracti128) || (ins == INS_vextractf128)); assert(attr == EA_32BYTE); assert(reg != REG_NA); instrDesc* id = emitNewInstrAmdCns(attr, indir->Offset(), imm); id->idIns(ins); id->idReg1(reg); emitHandleMemOp(indir, id, IF_AWR_RRD_CNS, ins); UNATIVE_OFFSET size = emitInsSizeAM(id, insCodeMR(ins), imm); id->idCodeSize(size); dispIns(id); emitCurIGsize += size; } void emitter::emitIns_AI_R(instruction ins, emitAttr attr, regNumber ireg, ssize_t disp) { UNATIVE_OFFSET sz; instrDesc* id = emitNewInstrAmd(attr, disp); insFormat fmt; if (ireg == REG_NA) { fmt = emitInsModeFormat(ins, IF_ARD); } else { fmt = emitInsModeFormat(ins, IF_ARD_RRD); assert((CodeGen::instIsFP(ins) == false) && (EA_SIZE(attr) <= EA_8BYTE)); noway_assert(emitVerifyEncodable(ins, EA_SIZE(attr), ireg)); id->idReg1(ireg); } id->idIns(ins); id->idInsFmt(fmt); id->idAddr()->iiaAddrMode.amBaseReg = REG_NA; id->idAddr()->iiaAddrMode.amIndxReg = REG_NA; assert(emitGetInsAmdAny(id) == disp); // make sure "disp" is stored properly sz = emitInsSizeAM(id, insCodeMR(ins)); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; emitAdjustStackDepthPushPop(ins); } void emitter::emitIns_I_ARR(instruction ins, emitAttr attr, int val, regNumber reg, regNumber rg2, int disp) { assert((CodeGen::instIsFP(ins) == false) && (EA_SIZE(attr) <= EA_8BYTE)); #ifdef TARGET_AMD64 // mov reg, imm64 is the only opcode which takes a full 8 byte immediate // all other opcodes take a sign-extended 4-byte immediate noway_assert(EA_SIZE(attr) < EA_8BYTE || !EA_IS_CNS_RELOC(attr)); #endif insFormat fmt; switch (ins) { case INS_rcl_N: case INS_rcr_N: case INS_rol_N: case INS_ror_N: case INS_shl_N: case INS_shr_N: case INS_sar_N: assert(val != 1); fmt = IF_ARW_SHF; val &= 0x7F; break; default: fmt = emitInsModeFormat(ins, IF_ARD_CNS); break; } UNATIVE_OFFSET sz; instrDesc* id = emitNewInstrAmdCns(attr, disp, val); id->idIns(ins); id->idInsFmt(fmt); id->idAddr()->iiaAddrMode.amBaseReg = reg; id->idAddr()->iiaAddrMode.amIndxReg = rg2; id->idAddr()->iiaAddrMode.amScale = emitter::OPSZ1; assert(emitGetInsAmdAny(id) == disp); // make sure "disp" is stored properly sz = emitInsSizeAM(id, insCodeMI(ins), val); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } void emitter::emitIns_R_ARR(instruction ins, emitAttr attr, regNumber reg, regNumber base, regNumber index, int disp) { emitIns_R_ARX(ins, attr, reg, base, index, 1, disp); } void emitter::emitIns_ARR_R(instruction ins, emitAttr attr, regNumber reg, regNumber base, regNumber index, int disp) { emitIns_ARX_R(ins, attr, reg, base, index, 1, disp); } void emitter::emitIns_I_ARX( instruction ins, emitAttr attr, int val, regNumber reg, regNumber rg2, unsigned mul, int disp) { assert((CodeGen::instIsFP(ins) == false) && (EA_SIZE(attr) <= EA_8BYTE)); #ifdef TARGET_AMD64 // mov reg, imm64 is the only opcode which takes a full 8 byte immediate // all other opcodes take a sign-extended 4-byte immediate noway_assert(EA_SIZE(attr) < EA_8BYTE || !EA_IS_CNS_RELOC(attr)); #endif insFormat fmt; switch (ins) { case INS_rcl_N: case INS_rcr_N: case INS_rol_N: case INS_ror_N: case INS_shl_N: case INS_shr_N: case INS_sar_N: assert(val != 1); fmt = IF_ARW_SHF; val &= 0x7F; break; default: fmt = emitInsModeFormat(ins, IF_ARD_CNS); break; } UNATIVE_OFFSET sz; instrDesc* id = emitNewInstrAmdCns(attr, disp, val); id->idIns(ins); id->idInsFmt(fmt); id->idAddr()->iiaAddrMode.amBaseReg = reg; id->idAddr()->iiaAddrMode.amIndxReg = rg2; id->idAddr()->iiaAddrMode.amScale = emitEncodeScale(mul); assert(emitGetInsAmdAny(id) == disp); // make sure "disp" is stored properly sz = emitInsSizeAM(id, insCodeMI(ins), val); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } void emitter::emitIns_R_ARX( instruction ins, emitAttr attr, regNumber reg, regNumber base, regNumber index, unsigned scale, int disp) { assert(!CodeGen::instIsFP(ins) && (EA_SIZE(attr) <= EA_32BYTE) && (reg != REG_NA)); noway_assert(emitVerifyEncodable(ins, EA_SIZE(attr), reg)); if ((ins == INS_lea) && (reg == base) && (index == REG_NA) && (disp == 0)) { // Maybe the emitter is not the common place for this optimization, but it's a better choke point // for all the emitIns(ins, tree), we would have to be analyzing at each call site // return; } UNATIVE_OFFSET sz; instrDesc* id = emitNewInstrAmd(attr, disp); insFormat fmt = emitInsModeFormat(ins, IF_RRD_ARD); id->idIns(ins); id->idInsFmt(fmt); id->idReg1(reg); id->idAddr()->iiaAddrMode.amBaseReg = base; id->idAddr()->iiaAddrMode.amIndxReg = index; id->idAddr()->iiaAddrMode.amScale = emitEncodeScale(scale); assert(emitGetInsAmdAny(id) == disp); // make sure "disp" is stored properly sz = emitInsSizeAM(id, insCodeRM(ins)); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } void emitter::emitIns_ARX_R( instruction ins, emitAttr attr, regNumber reg, regNumber base, regNumber index, unsigned scale, cnsval_ssize_t disp) { UNATIVE_OFFSET sz; instrDesc* id = emitNewInstrAmd(attr, disp); insFormat fmt; if (reg == REG_NA) { fmt = emitInsModeFormat(ins, IF_ARD); } else { fmt = emitInsModeFormat(ins, IF_ARD_RRD); noway_assert(emitVerifyEncodable(ins, EA_SIZE(attr), reg)); assert(!CodeGen::instIsFP(ins) && (EA_SIZE(attr) <= EA_32BYTE)); id->idReg1(reg); } id->idIns(ins); id->idInsFmt(fmt); id->idAddr()->iiaAddrMode.amBaseReg = base; id->idAddr()->iiaAddrMode.amIndxReg = index; id->idAddr()->iiaAddrMode.amScale = emitEncodeScale(scale); assert(emitGetInsAmdAny(id) == disp); // make sure "disp" is stored properly sz = emitInsSizeAM(id, insCodeMR(ins)); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; emitAdjustStackDepthPushPop(ins); } void emitter::emitIns_I_AX(instruction ins, emitAttr attr, int val, regNumber reg, unsigned mul, int disp) { assert((CodeGen::instIsFP(ins) == false) && (EA_SIZE(attr) <= EA_8BYTE)); #ifdef TARGET_AMD64 // mov reg, imm64 is the only opcode which takes a full 8 byte immediate // all other opcodes take a sign-extended 4-byte immediate noway_assert(EA_SIZE(attr) < EA_8BYTE || !EA_IS_CNS_RELOC(attr)); #endif insFormat fmt; switch (ins) { case INS_rcl_N: case INS_rcr_N: case INS_rol_N: case INS_ror_N: case INS_shl_N: case INS_shr_N: case INS_sar_N: assert(val != 1); fmt = IF_ARW_SHF; val &= 0x7F; break; default: fmt = emitInsModeFormat(ins, IF_ARD_CNS); break; } UNATIVE_OFFSET sz; instrDesc* id = emitNewInstrAmdCns(attr, disp, val); id->idIns(ins); id->idInsFmt(fmt); id->idAddr()->iiaAddrMode.amBaseReg = REG_NA; id->idAddr()->iiaAddrMode.amIndxReg = reg; id->idAddr()->iiaAddrMode.amScale = emitEncodeScale(mul); assert(emitGetInsAmdAny(id) == disp); // make sure "disp" is stored properly sz = emitInsSizeAM(id, insCodeMI(ins), val); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } void emitter::emitIns_R_AX(instruction ins, emitAttr attr, regNumber ireg, regNumber reg, unsigned mul, int disp) { assert((CodeGen::instIsFP(ins) == false) && (EA_SIZE(attr) <= EA_8BYTE) && (ireg != REG_NA)); noway_assert(emitVerifyEncodable(ins, EA_SIZE(attr), ireg)); UNATIVE_OFFSET sz; instrDesc* id = emitNewInstrAmd(attr, disp); insFormat fmt = emitInsModeFormat(ins, IF_RRD_ARD); id->idIns(ins); id->idInsFmt(fmt); id->idReg1(ireg); id->idAddr()->iiaAddrMode.amBaseReg = REG_NA; id->idAddr()->iiaAddrMode.amIndxReg = reg; id->idAddr()->iiaAddrMode.amScale = emitEncodeScale(mul); assert(emitGetInsAmdAny(id) == disp); // make sure "disp" is stored properly sz = emitInsSizeAM(id, insCodeRM(ins)); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; } void emitter::emitIns_AX_R(instruction ins, emitAttr attr, regNumber ireg, regNumber reg, unsigned mul, int disp) { UNATIVE_OFFSET sz; instrDesc* id = emitNewInstrAmd(attr, disp); insFormat fmt; if (ireg == REG_NA) { fmt = emitInsModeFormat(ins, IF_ARD); } else { fmt = emitInsModeFormat(ins, IF_ARD_RRD); noway_assert(emitVerifyEncodable(ins, EA_SIZE(attr), ireg)); assert((CodeGen::instIsFP(ins) == false) && (EA_SIZE(attr) <= EA_8BYTE)); id->idReg1(ireg); } id->idIns(ins); id->idInsFmt(fmt); id->idAddr()->iiaAddrMode.amBaseReg = REG_NA; id->idAddr()->iiaAddrMode.amIndxReg = reg; id->idAddr()->iiaAddrMode.amScale = emitEncodeScale(mul); assert(emitGetInsAmdAny(id) == disp); // make sure "disp" is stored properly sz = emitInsSizeAM(id, insCodeMR(ins)); id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; emitAdjustStackDepthPushPop(ins); } //------------------------------------------------------------------------ // emitIns_SIMD_R_R_I: emits the code for an instruction that takes a register operand, an immediate operand // and that returns a value in register // // Arguments: // ins -- The instruction being emitted // attr -- The emit attribute // targetReg -- The target register // op1Reg -- The register of the first operand // ival -- The immediate value // // Notes: // This will handle the required register copy if 'op1Reg' and 'targetReg' are not the same, and // the 3-operand format is not available. // This is not really SIMD-specific, but is currently only used in that context, as that's // where we frequently need to handle the case of generating 3-operand or 2-operand forms // depending on what target ISA is supported. // void emitter::emitIns_SIMD_R_R_I(instruction ins, emitAttr attr, regNumber targetReg, regNumber op1Reg, int ival) { if (UseVEXEncoding() || IsDstSrcImmAvxInstruction(ins)) { emitIns_R_R_I(ins, attr, targetReg, op1Reg, ival); } else { emitIns_Mov(INS_movaps, attr, targetReg, op1Reg, /* canSkip */ true); emitIns_R_I(ins, attr, targetReg, ival); } } //------------------------------------------------------------------------ // emitIns_SIMD_R_R_A: emits the code for a SIMD instruction that takes a register operand, a GenTreeIndir address, // and that returns a value in register // // Arguments: // ins -- The instruction being emitted // attr -- The emit attribute // targetReg -- The target register // op1Reg -- The register of the first operand // indir -- The GenTreeIndir used for the memory address // void emitter::emitIns_SIMD_R_R_A( instruction ins, emitAttr attr, regNumber targetReg, regNumber op1Reg, GenTreeIndir* indir) { if (UseVEXEncoding()) { emitIns_R_R_A(ins, attr, targetReg, op1Reg, indir); } else { emitIns_Mov(INS_movaps, attr, targetReg, op1Reg, /* canSkip */ true); emitIns_R_A(ins, attr, targetReg, indir); } } //------------------------------------------------------------------------ // emitIns_SIMD_R_R_AR: emits the code for a SIMD instruction that takes a register operand, a base memory register, // and that returns a value in register // // Arguments: // ins -- The instruction being emitted // attr -- The emit attribute // targetReg -- The target register // op1Reg -- The register of the first operand // base -- The base register used for the memory address // offset -- The memory offset // void emitter::emitIns_SIMD_R_R_AR( instruction ins, emitAttr attr, regNumber targetReg, regNumber op1Reg, regNumber base, int offset) { if (UseVEXEncoding()) { emitIns_R_R_AR(ins, attr, targetReg, op1Reg, base, offset); } else { emitIns_Mov(INS_movaps, attr, targetReg, op1Reg, /* canSkip */ true); emitIns_R_AR(ins, attr, targetReg, base, offset); } } //------------------------------------------------------------------------ // emitIns_SIMD_R_R_C: emits the code for a SIMD instruction that takes a register operand, a field handle + offset, // and that returns a value in register // // Arguments: // ins -- The instruction being emitted // attr -- The emit attribute // targetReg -- The target register // op1Reg -- The register of the first operand // fldHnd -- The CORINFO_FIELD_HANDLE used for the memory address // offs -- The offset added to the memory address from fldHnd // void emitter::emitIns_SIMD_R_R_C( instruction ins, emitAttr attr, regNumber targetReg, regNumber op1Reg, CORINFO_FIELD_HANDLE fldHnd, int offs) { if (UseVEXEncoding()) { emitIns_R_R_C(ins, attr, targetReg, op1Reg, fldHnd, offs); } else { emitIns_Mov(INS_movaps, attr, targetReg, op1Reg, /* canSkip */ true); emitIns_R_C(ins, attr, targetReg, fldHnd, offs); } } //------------------------------------------------------------------------ // emitIns_SIMD_R_R_R: emits the code for a SIMD instruction that takes two register operands, and that returns a // value in register // // Arguments: // ins -- The instruction being emitted // attr -- The emit attribute // targetReg -- The target register // op1Reg -- The register of the first operand // op2Reg -- The register of the second operand // void emitter::emitIns_SIMD_R_R_R( instruction ins, emitAttr attr, regNumber targetReg, regNumber op1Reg, regNumber op2Reg) { if (UseVEXEncoding()) { emitIns_R_R_R(ins, attr, targetReg, op1Reg, op2Reg); } else { // Ensure we aren't overwriting op2 assert((op2Reg != targetReg) || (op1Reg == targetReg)); emitIns_Mov(INS_movaps, attr, targetReg, op1Reg, /* canSkip */ true); if (IsMovInstruction(ins)) { emitIns_Mov(ins, attr, targetReg, op2Reg, /* canSkip */ false); } else { emitIns_R_R(ins, attr, targetReg, op2Reg); } } } //------------------------------------------------------------------------ // emitIns_SIMD_R_R_S: emits the code for a SIMD instruction that takes a register operand, a variable index + offset, // and that returns a value in register // // Arguments: // ins -- The instruction being emitted // attr -- The emit attribute // targetReg -- The target register // op1Reg -- The register of the first operand // varx -- The variable index used for the memory address // offs -- The offset added to the memory address from varx // void emitter::emitIns_SIMD_R_R_S( instruction ins, emitAttr attr, regNumber targetReg, regNumber op1Reg, int varx, int offs) { if (UseVEXEncoding()) { emitIns_R_R_S(ins, attr, targetReg, op1Reg, varx, offs); } else { emitIns_Mov(INS_movaps, attr, targetReg, op1Reg, /* canSkip */ true); emitIns_R_S(ins, attr, targetReg, varx, offs); } } #ifdef FEATURE_HW_INTRINSICS //------------------------------------------------------------------------ // emitIns_SIMD_R_R_A_I: emits the code for a SIMD instruction that takes a register operand, a GenTreeIndir address, // an immediate operand, and that returns a value in register // // Arguments: // ins -- The instruction being emitted // attr -- The emit attribute // targetReg -- The target register // op1Reg -- The register of the first operand // indir -- The GenTreeIndir used for the memory address // ival -- The immediate value // void emitter::emitIns_SIMD_R_R_A_I( instruction ins, emitAttr attr, regNumber targetReg, regNumber op1Reg, GenTreeIndir* indir, int ival) { if (UseVEXEncoding()) { emitIns_R_R_A_I(ins, attr, targetReg, op1Reg, indir, ival, IF_RWR_RRD_ARD_CNS); } else { emitIns_Mov(INS_movaps, attr, targetReg, op1Reg, /* canSkip */ true); emitIns_R_A_I(ins, attr, targetReg, indir, ival); } } //------------------------------------------------------------------------ // emitIns_SIMD_R_R_AR_I: emits the code for a SIMD instruction that takes a register operand, a base memory register, // an immediate operand, and that returns a value in register // // Arguments: // ins -- The instruction being emitted // attr -- The emit attribute // targetReg -- The target register // op1Reg -- The register of the first operand // base -- The base register used for the memory address // ival -- The immediate value // void emitter::emitIns_SIMD_R_R_AR_I( instruction ins, emitAttr attr, regNumber targetReg, regNumber op1Reg, regNumber base, int ival) { if (UseVEXEncoding()) { emitIns_R_R_AR_I(ins, attr, targetReg, op1Reg, base, 0, ival); } else { emitIns_Mov(INS_movaps, attr, targetReg, op1Reg, /* canSkip */ true); emitIns_R_AR_I(ins, attr, targetReg, base, 0, ival); } } //------------------------------------------------------------------------ // emitIns_SIMD_R_R_C_I: emits the code for a SIMD instruction that takes a register operand, a field handle + offset, // an immediate operand, and that returns a value in register // // Arguments: // ins -- The instruction being emitted // attr -- The emit attribute // targetReg -- The target register // op1Reg -- The register of the first operand // fldHnd -- The CORINFO_FIELD_HANDLE used for the memory address // offs -- The offset added to the memory address from fldHnd // ival -- The immediate value // void emitter::emitIns_SIMD_R_R_C_I(instruction ins, emitAttr attr, regNumber targetReg, regNumber op1Reg, CORINFO_FIELD_HANDLE fldHnd, int offs, int ival) { if (UseVEXEncoding()) { emitIns_R_R_C_I(ins, attr, targetReg, op1Reg, fldHnd, offs, ival); } else { emitIns_Mov(INS_movaps, attr, targetReg, op1Reg, /* canSkip */ true); emitIns_R_C_I(ins, attr, targetReg, fldHnd, offs, ival); } } //------------------------------------------------------------------------ // emitIns_SIMD_R_R_R_I: emits the code for a SIMD instruction that takes two register operands, an immediate operand, // and that returns a value in register // // Arguments: // ins -- The instruction being emitted // attr -- The emit attribute // targetReg -- The target register // op1Reg -- The register of the first operand // op2Reg -- The register of the second operand // ival -- The immediate value // void emitter::emitIns_SIMD_R_R_R_I( instruction ins, emitAttr attr, regNumber targetReg, regNumber op1Reg, regNumber op2Reg, int ival) { if (UseVEXEncoding()) { emitIns_R_R_R_I(ins, attr, targetReg, op1Reg, op2Reg, ival); } else { // Ensure we aren't overwriting op2 assert((op2Reg != targetReg) || (op1Reg == targetReg)); emitIns_Mov(INS_movaps, attr, targetReg, op1Reg, /* canSkip */ true); emitIns_R_R_I(ins, attr, targetReg, op2Reg, ival); } } //------------------------------------------------------------------------ // emitIns_SIMD_R_R_S_I: emits the code for a SIMD instruction that takes a register operand, a variable index + offset, // an imediate operand, and that returns a value in register // // Arguments: // ins -- The instruction being emitted // attr -- The emit attribute // targetReg -- The target register // op1Reg -- The register of the first operand // varx -- The variable index used for the memory address // offs -- The offset added to the memory address from varx // ival -- The immediate value // void emitter::emitIns_SIMD_R_R_S_I( instruction ins, emitAttr attr, regNumber targetReg, regNumber op1Reg, int varx, int offs, int ival) { if (UseVEXEncoding()) { emitIns_R_R_S_I(ins, attr, targetReg, op1Reg, varx, offs, ival); } else { emitIns_Mov(INS_movaps, attr, targetReg, op1Reg, /* canSkip */ true); emitIns_R_S_I(ins, attr, targetReg, varx, offs, ival); } } //------------------------------------------------------------------------ // emitIns_SIMD_R_R_R_A: emits the code for a SIMD instruction that takes two register operands, a GenTreeIndir address, // and that returns a value in register // // Arguments: // ins -- The instruction being emitted // attr -- The emit attribute // targetReg -- The target register // op1Reg -- The register of the first operand // op2Reg -- The register of the second operand // indir -- The GenTreeIndir used for the memory address // void emitter::emitIns_SIMD_R_R_R_A( instruction ins, emitAttr attr, regNumber targetReg, regNumber op1Reg, regNumber op2Reg, GenTreeIndir* indir) { assert(IsFMAInstruction(ins) || IsAVXVNNIInstruction(ins)); assert(UseVEXEncoding()); // Ensure we aren't overwriting op2 assert((op2Reg != targetReg) || (op1Reg == targetReg)); emitIns_Mov(INS_movaps, attr, targetReg, op1Reg, /* canSkip */ true); emitIns_R_R_A(ins, attr, targetReg, op2Reg, indir); } //------------------------------------------------------------------------ // emitIns_SIMD_R_R_R_AR: emits the code for a SIMD instruction that takes two register operands, a base memory // register, and that returns a value in register // // Arguments: // ins -- The instruction being emitted // attr -- The emit attribute // targetReg -- The target register // op1Reg -- The register of the first operands // op2Reg -- The register of the second operand // base -- The base register used for the memory address // void emitter::emitIns_SIMD_R_R_R_AR( instruction ins, emitAttr attr, regNumber targetReg, regNumber op1Reg, regNumber op2Reg, regNumber base) { assert(IsFMAInstruction(ins)); assert(UseVEXEncoding()); // Ensure we aren't overwriting op2 assert((op2Reg != targetReg) || (op1Reg == targetReg)); emitIns_Mov(INS_movaps, attr, targetReg, op1Reg, /* canSkip */ true); emitIns_R_R_AR(ins, attr, targetReg, op2Reg, base, 0); } //------------------------------------------------------------------------ // emitIns_SIMD_R_R_R_C: emits the code for a SIMD instruction that takes two register operands, a field handle + // offset, and that returns a value in register // // Arguments: // ins -- The instruction being emitted // attr -- The emit attribute // targetReg -- The target register // op1Reg -- The register of the first operand // op2Reg -- The register of the second operand // fldHnd -- The CORINFO_FIELD_HANDLE used for the memory address // offs -- The offset added to the memory address from fldHnd // void emitter::emitIns_SIMD_R_R_R_C(instruction ins, emitAttr attr, regNumber targetReg, regNumber op1Reg, regNumber op2Reg, CORINFO_FIELD_HANDLE fldHnd, int offs) { assert(IsFMAInstruction(ins)); assert(UseVEXEncoding()); // Ensure we aren't overwriting op2 assert((op2Reg != targetReg) || (op1Reg == targetReg)); emitIns_Mov(INS_movaps, attr, targetReg, op1Reg, /* canSkip */ true); emitIns_R_R_C(ins, attr, targetReg, op2Reg, fldHnd, offs); } //------------------------------------------------------------------------ // emitIns_SIMD_R_R_R_R: emits the code for a SIMD instruction that takes three register operands, and that returns a // value in register // // Arguments: // ins -- The instruction being emitted // attr -- The emit attribute // targetReg -- The target register // op1Reg -- The register of the first operand // op2Reg -- The register of the second operand // op3Reg -- The register of the second operand // void emitter::emitIns_SIMD_R_R_R_R( instruction ins, emitAttr attr, regNumber targetReg, regNumber op1Reg, regNumber op2Reg, regNumber op3Reg) { if (IsFMAInstruction(ins) || IsAVXVNNIInstruction(ins)) { assert(UseVEXEncoding()); // Ensure we aren't overwriting op2 or op3 assert((op2Reg != targetReg) || (op1Reg == targetReg)); assert((op3Reg != targetReg) || (op1Reg == targetReg)); emitIns_Mov(INS_movaps, attr, targetReg, op1Reg, /* canSkip */ true); emitIns_R_R_R(ins, attr, targetReg, op2Reg, op3Reg); } else if (UseVEXEncoding()) { assert(isAvxBlendv(ins) || isSse41Blendv(ins)); // convert SSE encoding of SSE4.1 instructions to VEX encoding switch (ins) { case INS_blendvps: ins = INS_vblendvps; break; case INS_blendvpd: ins = INS_vblendvpd; break; case INS_pblendvb: ins = INS_vpblendvb; break; default: break; } emitIns_R_R_R_R(ins, attr, targetReg, op1Reg, op2Reg, op3Reg); } else { assert(isSse41Blendv(ins)); // Ensure we aren't overwriting op1 or op2 assert((op1Reg != REG_XMM0) || (op3Reg == REG_XMM0)); assert((op2Reg != REG_XMM0) || (op3Reg == REG_XMM0)); // SSE4.1 blendv* hardcode the mask vector (op3) in XMM0 emitIns_Mov(INS_movaps, attr, REG_XMM0, op3Reg, /* canSkip */ true); // Ensure we aren't overwriting op2 or oop3 (which should be REG_XMM0) assert((op2Reg != targetReg) || (op1Reg == targetReg)); assert(targetReg != REG_XMM0); emitIns_Mov(INS_movaps, attr, targetReg, op1Reg, /* canSkip */ true); emitIns_R_R(ins, attr, targetReg, op2Reg); } } //------------------------------------------------------------------------ // emitIns_SIMD_R_R_R_S: emits the code for a SIMD instruction that takes two register operands, a variable index + // offset, and that returns a value in register // // Arguments: // ins -- The instruction being emitted // attr -- The emit attribute // targetReg -- The target register // op1Reg -- The register of the first operand // op2Reg -- The register of the second operand // varx -- The variable index used for the memory address // offs -- The offset added to the memory address from varx // void emitter::emitIns_SIMD_R_R_R_S( instruction ins, emitAttr attr, regNumber targetReg, regNumber op1Reg, regNumber op2Reg, int varx, int offs) { assert(IsFMAInstruction(ins) || IsAVXVNNIInstruction(ins)); assert(UseVEXEncoding()); // Ensure we aren't overwriting op2 assert((op2Reg != targetReg) || (op1Reg == targetReg)); emitIns_Mov(INS_movaps, attr, targetReg, op1Reg, /* canSkip */ true); emitIns_R_R_S(ins, attr, targetReg, op2Reg, varx, offs); } //------------------------------------------------------------------------ // emitIns_SIMD_R_R_A_R: emits the code for a SIMD instruction that takes a register operand, a GenTreeIndir address, // another register operand, and that returns a value in register // // Arguments: // ins -- The instruction being emitted // attr -- The emit attribute // targetReg -- The target register // op1Reg -- The register of the first operand // op3Reg -- The register of the third operand // indir -- The GenTreeIndir used for the memory address // void emitter::emitIns_SIMD_R_R_A_R( instruction ins, emitAttr attr, regNumber targetReg, regNumber op1Reg, regNumber op3Reg, GenTreeIndir* indir) { if (UseVEXEncoding()) { assert(isAvxBlendv(ins) || isSse41Blendv(ins)); // convert SSE encoding of SSE4.1 instructions to VEX encoding switch (ins) { case INS_blendvps: { ins = INS_vblendvps; break; } case INS_blendvpd: { ins = INS_vblendvpd; break; } case INS_pblendvb: { ins = INS_vpblendvb; break; } default: { break; } } emitIns_R_R_A_R(ins, attr, targetReg, op1Reg, op3Reg, indir); } else { assert(isSse41Blendv(ins)); // Ensure we aren't overwriting op1 assert(op1Reg != REG_XMM0); // SSE4.1 blendv* hardcode the mask vector (op3) in XMM0 emitIns_Mov(INS_movaps, attr, REG_XMM0, op3Reg, /* canSkip */ true); // Ensure we aren't overwriting op3 (which should be REG_XMM0) assert(targetReg != REG_XMM0); emitIns_Mov(INS_movaps, attr, targetReg, op1Reg, /* canSkip */ true); emitIns_R_A(ins, attr, targetReg, indir); } } //------------------------------------------------------------------------ // emitIns_SIMD_R_R_AR_R: emits the code for a SIMD instruction that takes a register operand, a base memory // register, another register operand, and that returns a value in register // // Arguments: // ins -- The instruction being emitted // attr -- The emit attribute // targetReg -- The target register // op1Reg -- The register of the first operands // op3Reg -- The register of the third operand // base -- The base register used for the memory address // void emitter::emitIns_SIMD_R_R_AR_R( instruction ins, emitAttr attr, regNumber targetReg, regNumber op1Reg, regNumber op3Reg, regNumber base) { if (UseVEXEncoding()) { assert(isAvxBlendv(ins) || isSse41Blendv(ins)); // convert SSE encoding of SSE4.1 instructions to VEX encoding switch (ins) { case INS_blendvps: { ins = INS_vblendvps; break; } case INS_blendvpd: { ins = INS_vblendvpd; break; } case INS_pblendvb: { ins = INS_vpblendvb; break; } default: { break; } } emitIns_R_R_AR_R(ins, attr, targetReg, op1Reg, op3Reg, base, 0); } else { assert(isSse41Blendv(ins)); // Ensure we aren't overwriting op1 assert(op1Reg != REG_XMM0); // SSE4.1 blendv* hardcode the mask vector (op3) in XMM0 emitIns_Mov(INS_movaps, attr, REG_XMM0, op3Reg, /* canSkip */ true); // Ensure we aren't overwriting op3 (which should be REG_XMM0) assert(targetReg != REG_XMM0); emitIns_Mov(INS_movaps, attr, targetReg, op1Reg, /* canSkip */ true); emitIns_R_AR(ins, attr, targetReg, base, 0); } } //------------------------------------------------------------------------ // emitIns_SIMD_R_R_C_R: emits the code for a SIMD instruction that takes a register operand, a field handle + // offset, another register operand, and that returns a value in register // // Arguments: // ins -- The instruction being emitted // attr -- The emit attribute // targetReg -- The target register // op1Reg -- The register of the first operand // op3Reg -- The register of the third operand // fldHnd -- The CORINFO_FIELD_HANDLE used for the memory address // offs -- The offset added to the memory address from fldHnd // void emitter::emitIns_SIMD_R_R_C_R(instruction ins, emitAttr attr, regNumber targetReg, regNumber op1Reg, regNumber op3Reg, CORINFO_FIELD_HANDLE fldHnd, int offs) { if (UseVEXEncoding()) { assert(isAvxBlendv(ins) || isSse41Blendv(ins)); // convert SSE encoding of SSE4.1 instructions to VEX encoding switch (ins) { case INS_blendvps: { ins = INS_vblendvps; break; } case INS_blendvpd: { ins = INS_vblendvpd; break; } case INS_pblendvb: { ins = INS_vpblendvb; break; } default: { break; } } emitIns_R_R_C_R(ins, attr, targetReg, op1Reg, op3Reg, fldHnd, offs); } else { assert(isSse41Blendv(ins)); // Ensure we aren't overwriting op1 assert(op1Reg != REG_XMM0); // SSE4.1 blendv* hardcode the mask vector (op3) in XMM0 emitIns_Mov(INS_movaps, attr, REG_XMM0, op3Reg, /* canSkip */ true); // Ensure we aren't overwriting op3 (which should be REG_XMM0) assert(targetReg != REG_XMM0); emitIns_Mov(INS_movaps, attr, targetReg, op1Reg, /* canSkip */ true); emitIns_R_C(ins, attr, targetReg, fldHnd, offs); } } //------------------------------------------------------------------------ // emitIns_SIMD_R_R_S_R: emits the code for a SIMD instruction that takes a register operand, a variable index + // offset, another register operand, and that returns a value in register // // Arguments: // ins -- The instruction being emitted // attr -- The emit attribute // targetReg -- The target register // op1Reg -- The register of the first operand // op3Reg -- The register of the third operand // varx -- The variable index used for the memory address // offs -- The offset added to the memory address from varx // void emitter::emitIns_SIMD_R_R_S_R( instruction ins, emitAttr attr, regNumber targetReg, regNumber op1Reg, regNumber op3Reg, int varx, int offs) { if (UseVEXEncoding()) { assert(isAvxBlendv(ins) || isSse41Blendv(ins)); // convert SSE encoding of SSE4.1 instructions to VEX encoding switch (ins) { case INS_blendvps: { ins = INS_vblendvps; break; } case INS_blendvpd: { ins = INS_vblendvpd; break; } case INS_pblendvb: { ins = INS_vpblendvb; break; } default: { break; } } emitIns_R_R_S_R(ins, attr, targetReg, op1Reg, op3Reg, varx, offs); } else { assert(isSse41Blendv(ins)); // Ensure we aren't overwriting op1 assert(op1Reg != REG_XMM0); // SSE4.1 blendv* hardcode the mask vector (op3) in XMM0 emitIns_Mov(INS_movaps, attr, REG_XMM0, op3Reg, /* canSkip */ true); // Ensure we aren't overwriting op3 (which should be REG_XMM0) assert(targetReg != REG_XMM0); emitIns_Mov(INS_movaps, attr, targetReg, op1Reg, /* canSkip */ true); emitIns_R_S(ins, attr, targetReg, varx, offs); } } #endif // FEATURE_HW_INTRINSICS /***************************************************************************** * * The following add instructions referencing stack-based local variables. */ void emitter::emitIns_S(instruction ins, emitAttr attr, int varx, int offs) { UNATIVE_OFFSET sz; instrDesc* id = emitNewInstr(attr); insFormat fmt = emitInsModeFormat(ins, IF_SRD); id->idIns(ins); id->idInsFmt(fmt); id->idAddr()->iiaLclVar.initLclVarAddr(varx, offs); sz = emitInsSizeSV(id, insCodeMR(ins), varx, offs); id->idCodeSize(sz); #ifdef DEBUG id->idDebugOnlyInfo()->idVarRefOffs = emitVarRefOffs; #endif dispIns(id); emitCurIGsize += sz; emitAdjustStackDepthPushPop(ins); } //---------------------------------------------------------------------------------------- // IsRedundantStackMov: // Check if the current `mov` instruction is redundant and can be omitted when dealing with Load/Store from stack. // A `mov` is redundant in following 2 cases: // // 1. Move that is identical to last instruction emitted. // // vmovapd xmmword ptr [V01 rbp-20H], xmm0 # <-- last instruction // vmovapd xmmword ptr [V01 rbp-20H], xmm0 # <-- current instruction can be omitted. // // 2. Opposite Move as that of last instruction emitted. // // vmovupd ymmword ptr[V01 rbp-50H], ymm0 # <-- last instruction // vmovupd ymm0, ymmword ptr[V01 rbp-50H] # <-- current instruction can be omitted. // // Arguments: // ins - The current instruction // fmt - The current format // size - Operand size of current instruction // ireg - The current source/destination register // varx - The variable index used for the memory address // offs - The offset added to the memory address from varx // // Return Value: // true if the move instruction is redundant; otherwise, false. bool emitter::IsRedundantStackMov(instruction ins, insFormat fmt, emitAttr size, regNumber ireg, int varx, int offs) { assert(IsMovInstruction(ins)); assert((fmt == IF_SWR_RRD) || (fmt == IF_RWR_SRD)); if (!emitComp->opts.OptimizationEnabled()) { // The remaining move elisions should only happen if optimizations are enabled return false; } // Skip optimization if current instruction creates a GC live value. if (EA_IS_GCREF_OR_BYREF(size)) { return false; } bool hasSideEffect = HasSideEffect(ins, size); bool isFirstInstrInBlock = (emitCurIGinsCnt == 0) && ((emitCurIG->igFlags & IGF_EXTEND) == 0); // TODO-XArch-CQ: Certain instructions, such as movaps vs movups, are equivalent in // functionality even if their actual identifier differs and we should optimize these if (isFirstInstrInBlock || // Don't optimize if instruction is the first instruction in IG. (emitLastIns == nullptr) || // or if a last instruction doesn't exist (emitLastIns->idIns() != ins) || // or if the instruction is different from the last instruction (emitLastIns->idOpSize() != size)) // or if the operand size is different from the last instruction { return false; } // Don't optimize if the last instruction is also not a Load/Store. if (!((emitLastIns->idInsFmt() == IF_SWR_RRD) || (emitLastIns->idInsFmt() == IF_RWR_SRD))) { return false; } regNumber lastReg1 = emitLastIns->idReg1(); int varNum = emitLastIns->idAddr()->iiaLclVar.lvaVarNum(); int lastOffs = emitLastIns->idAddr()->iiaLclVar.lvaOffset(); // Check if the last instruction and current instructions use the same register and local memory. if (varNum == varx && lastReg1 == ireg && lastOffs == offs) { // Check if we did a switched mov in the last instruction and don't have a side effect if ((((emitLastIns->idInsFmt() == IF_RWR_SRD) && (fmt == IF_SWR_RRD)) || ((emitLastIns->idInsFmt() == IF_SWR_RRD) && (fmt == IF_RWR_SRD))) && !hasSideEffect) // or if the format is different from the last instruction { JITDUMP("\n -- suppressing mov because last instruction already moved from dst to src and the mov has " "no side-effects.\n"); return true; } // Check if we did same move in last instruction, side effects don't matter since they already happened if (emitLastIns->idInsFmt() == fmt) { JITDUMP("\n -- suppressing mov because last instruction already moved from src to dst.\n"); return true; } } return false; } void emitter::emitIns_S_R(instruction ins, emitAttr attr, regNumber ireg, int varx, int offs) { insFormat fmt = emitInsModeFormat(ins, IF_SRD_RRD); if (IsMovInstruction(ins) && IsRedundantStackMov(ins, fmt, attr, ireg, varx, offs)) { return; } UNATIVE_OFFSET sz; instrDesc* id = emitNewInstr(attr); id->idIns(ins); id->idInsFmt(fmt); id->idReg1(ireg); id->idAddr()->iiaLclVar.initLclVarAddr(varx, offs); sz = emitInsSizeSV(id, insCodeMR(ins), varx, offs); #ifdef TARGET_X86 if (attr == EA_1BYTE) { assert(isByteReg(ireg)); } #endif id->idCodeSize(sz); #ifdef DEBUG id->idDebugOnlyInfo()->idVarRefOffs = emitVarRefOffs; #endif dispIns(id); emitCurIGsize += sz; } void emitter::emitIns_R_S(instruction ins, emitAttr attr, regNumber ireg, int varx, int offs) { emitAttr size = EA_SIZE(attr); noway_assert(emitVerifyEncodable(ins, size, ireg)); insFormat fmt = emitInsModeFormat(ins, IF_RRD_SRD); if (IsMovInstruction(ins) && IsRedundantStackMov(ins, fmt, attr, ireg, varx, offs)) { return; } instrDesc* id = emitNewInstr(attr); UNATIVE_OFFSET sz; id->idIns(ins); id->idInsFmt(fmt); id->idReg1(ireg); id->idAddr()->iiaLclVar.initLclVarAddr(varx, offs); sz = emitInsSizeSV(id, insCodeRM(ins), varx, offs); id->idCodeSize(sz); #ifdef DEBUG id->idDebugOnlyInfo()->idVarRefOffs = emitVarRefOffs; #endif dispIns(id); emitCurIGsize += sz; } void emitter::emitIns_S_I(instruction ins, emitAttr attr, int varx, int offs, int val) { #ifdef TARGET_AMD64 // mov reg, imm64 is the only opcode which takes a full 8 byte immediate // all other opcodes take a sign-extended 4-byte immediate noway_assert(EA_SIZE(attr) < EA_8BYTE || !EA_IS_CNS_RELOC(attr)); #endif insFormat fmt; switch (ins) { case INS_rcl_N: case INS_rcr_N: case INS_rol_N: case INS_ror_N: case INS_shl_N: case INS_shr_N: case INS_sar_N: assert(val != 1); fmt = IF_SRW_SHF; val &= 0x7F; break; default: fmt = emitInsModeFormat(ins, IF_SRD_CNS); break; } instrDesc* id = emitNewInstrCns(attr, val); id->idIns(ins); id->idInsFmt(fmt); id->idAddr()->iiaLclVar.initLclVarAddr(varx, offs); UNATIVE_OFFSET sz = emitInsSizeSV(id, insCodeMI(ins), varx, offs, val); id->idCodeSize(sz); #ifdef DEBUG id->idDebugOnlyInfo()->idVarRefOffs = emitVarRefOffs; #endif dispIns(id); emitCurIGsize += sz; } /***************************************************************************** * * Record that a jump instruction uses the short encoding * */ void emitter::emitSetShortJump(instrDescJmp* id) { if (id->idjKeepLong) { return; } id->idjShort = true; } /***************************************************************************** * * Add a jmp instruction. * When dst is NULL, instrCount specifies number of instructions * to jump: positive is forward, negative is backward. */ void emitter::emitIns_J(instruction ins, BasicBlock* dst, int instrCount /* = 0 */) { UNATIVE_OFFSET sz; instrDescJmp* id = emitNewInstrJmp(); if (dst != nullptr) { assert(dst->bbFlags & BBF_HAS_LABEL); assert(instrCount == 0); } else { /* Only allow non-label jmps in prolog */ assert(emitPrologIG); assert(emitPrologIG == emitCurIG); assert(instrCount != 0); } id->idIns(ins); id->idInsFmt(IF_LABEL); #ifdef DEBUG // Mark the finally call if (ins == INS_call && emitComp->compCurBB->bbJumpKind == BBJ_CALLFINALLY) { id->idDebugOnlyInfo()->idFinallyCall = true; } #endif // DEBUG id->idjShort = 0; if (dst != nullptr) { /* Assume the jump will be long */ id->idAddr()->iiaBBlabel = dst; id->idjKeepLong = emitComp->fgInDifferentRegions(emitComp->compCurBB, dst); } else { id->idAddr()->iiaSetInstrCount(instrCount); id->idjKeepLong = false; /* This jump must be short */ emitSetShortJump(id); id->idSetIsBound(); } /* Record the jump's IG and offset within it */ id->idjIG = emitCurIG; id->idjOffs = emitCurIGsize; /* Append this jump to this IG's jump list */ id->idjNext = emitCurIGjmpList; emitCurIGjmpList = id; #if EMITTER_STATS emitTotalIGjmps++; #endif /* Figure out the max. size of the jump/call instruction */ if (ins == INS_call) { sz = CALL_INST_SIZE; } else if (ins == INS_push || ins == INS_push_hide) { // Pushing the address of a basicBlock will need a reloc // as the instruction uses the absolute address, // not a relative address if (emitComp->opts.compReloc) { id->idSetIsDspReloc(); } sz = PUSH_INST_SIZE; } else { insGroup* tgt = nullptr; if (dst != nullptr) { /* This is a jump - assume the worst */ sz = (ins == INS_jmp) ? JMP_SIZE_LARGE : JCC_SIZE_LARGE; /* Can we guess at the jump distance? */ tgt = (insGroup*)emitCodeGetCookie(dst); } else { sz = JMP_SIZE_SMALL; } if (tgt) { int extra; UNATIVE_OFFSET srcOffs; int jmpDist; assert(JMP_SIZE_SMALL == JCC_SIZE_SMALL); /* This is a backward jump - figure out the distance */ srcOffs = emitCurCodeOffset + emitCurIGsize + JMP_SIZE_SMALL; /* Compute the distance estimate */ jmpDist = srcOffs - tgt->igOffs; assert((int)jmpDist > 0); /* How much beyond the max. short distance does the jump go? */ extra = jmpDist + JMP_DIST_SMALL_MAX_NEG; #if DEBUG_EMIT if (id->idDebugOnlyInfo()->idNum == (unsigned)INTERESTING_JUMP_NUM || INTERESTING_JUMP_NUM == 0) { if (INTERESTING_JUMP_NUM == 0) { printf("[0] Jump %u:\n", id->idDebugOnlyInfo()->idNum); } printf("[0] Jump source is at %08X\n", srcOffs); printf("[0] Label block is at %08X\n", tgt->igOffs); printf("[0] Jump distance - %04X\n", jmpDist); if (extra > 0) { printf("[0] Distance excess = %d \n", extra); } } #endif if (extra <= 0 && !id->idjKeepLong) { /* Wonderful - this jump surely will be short */ emitSetShortJump(id); sz = JMP_SIZE_SMALL; } } #if DEBUG_EMIT else { if (id->idDebugOnlyInfo()->idNum == (unsigned)INTERESTING_JUMP_NUM || INTERESTING_JUMP_NUM == 0) { if (INTERESTING_JUMP_NUM == 0) { printf("[0] Jump %u:\n", id->idDebugOnlyInfo()->idNum); } printf("[0] Jump source is at %04X/%08X\n", emitCurIGsize, emitCurCodeOffset + emitCurIGsize + JMP_SIZE_SMALL); printf("[0] Label block is unknown\n"); } } #endif } id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; emitAdjustStackDepthPushPop(ins); } #if !FEATURE_FIXED_OUT_ARGS //------------------------------------------------------------------------ // emitAdjustStackDepthPushPop: Adjust the current and maximum stack depth. // // Arguments: // ins - the instruction. Only INS_push and INS_pop adjust the stack depth. // // Notes: // 1. Alters emitCurStackLvl and possibly emitMaxStackDepth. // 2. emitCntStackDepth must be set (0 in prolog/epilog, one DWORD elsewhere) // void emitter::emitAdjustStackDepthPushPop(instruction ins) { if (ins == INS_push) { emitCurStackLvl += emitCntStackDepth; if (emitMaxStackDepth < emitCurStackLvl) { JITDUMP("Upping emitMaxStackDepth from %d to %d\n", emitMaxStackDepth, emitCurStackLvl); emitMaxStackDepth = emitCurStackLvl; } } else if (ins == INS_pop) { emitCurStackLvl -= emitCntStackDepth; assert((int)emitCurStackLvl >= 0); } } //------------------------------------------------------------------------ // emitAdjustStackDepth: Adjust the current and maximum stack depth. // // Arguments: // ins - the instruction. Only INS_add and INS_sub adjust the stack depth. // It is assumed that the add/sub is on the stack pointer. // val - the number of bytes to add to or subtract from the stack pointer. // // Notes: // 1. Alters emitCurStackLvl and possibly emitMaxStackDepth. // 2. emitCntStackDepth must be set (0 in prolog/epilog, one DWORD elsewhere) // void emitter::emitAdjustStackDepth(instruction ins, ssize_t val) { // If we're in the prolog or epilog, or otherwise not tracking the stack depth, just return. if (emitCntStackDepth == 0) return; if (ins == INS_sub) { S_UINT32 newStackLvl(emitCurStackLvl); newStackLvl += S_UINT32(val); noway_assert(!newStackLvl.IsOverflow()); emitCurStackLvl = newStackLvl.Value(); if (emitMaxStackDepth < emitCurStackLvl) { JITDUMP("Upping emitMaxStackDepth from %d to %d\n", emitMaxStackDepth, emitCurStackLvl); emitMaxStackDepth = emitCurStackLvl; } } else if (ins == INS_add) { S_UINT32 newStackLvl = S_UINT32(emitCurStackLvl) - S_UINT32(val); noway_assert(!newStackLvl.IsOverflow()); emitCurStackLvl = newStackLvl.Value(); } } #endif // EMIT_TRACK_STACK_DEPTH /***************************************************************************** * * Add a call instruction (direct or indirect). * argSize<0 means that the caller will pop the arguments * * The other arguments are interpreted depending on callType as shown: * Unless otherwise specified, ireg,xreg,xmul,disp should have default values. * * EC_FUNC_TOKEN : addr is the method address * EC_FUNC_TOKEN_INDIR : addr is the indirect method address * EC_FUNC_ADDR : addr is the absolute address of the function * EC_FUNC_VIRTUAL : "call [ireg+disp]" * * If callType is one of these emitCallTypes, addr has to be NULL. * EC_INDIR_R : "call ireg". * EC_INDIR_SR : "call lcl<disp>" (eg. call [ebp-8]). * EC_INDIR_C : "call clsVar<disp>" (eg. call [clsVarAddr]) * EC_INDIR_ARD : "call [ireg+xreg*xmul+disp]" * */ // clang-format off void emitter::emitIns_Call(EmitCallType callType, CORINFO_METHOD_HANDLE methHnd, INDEBUG_LDISASM_COMMA(CORINFO_SIG_INFO* sigInfo) // used to report call sites to the EE void* addr, ssize_t argSize, emitAttr retSize MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(emitAttr secondRetSize), VARSET_VALARG_TP ptrVars, regMaskTP gcrefRegs, regMaskTP byrefRegs, const DebugInfo& di, regNumber ireg, regNumber xreg, unsigned xmul, ssize_t disp, bool isJump) // clang-format on { /* Sanity check the arguments depending on callType */ assert(callType < EC_COUNT); assert((callType != EC_FUNC_TOKEN && callType != EC_FUNC_TOKEN_INDIR) || (addr != nullptr && ireg == REG_NA && xreg == REG_NA && xmul == 0 && disp == 0)); assert(callType != EC_INDIR_R || (addr == nullptr && ireg < REG_COUNT && xreg == REG_NA && xmul == 0 && disp == 0)); assert(callType != EC_INDIR_ARD || (addr == nullptr)); // Our stack level should be always greater than the bytes of arguments we push. Just // a sanity test. assert((unsigned)abs((signed)argSize) <= codeGen->genStackLevel); // Trim out any callee-trashed registers from the live set. regMaskTP savedSet = emitGetGCRegsSavedOrModified(methHnd); gcrefRegs &= savedSet; byrefRegs &= savedSet; #ifdef DEBUG if (EMIT_GC_VERBOSE) { printf("\t\t\t\t\t\t\tCall: GCvars=%s ", VarSetOps::ToString(emitComp, ptrVars)); dumpConvertedVarSet(emitComp, ptrVars); printf(", gcrefRegs="); printRegMaskInt(gcrefRegs); emitDispRegSet(gcrefRegs); printf(", byrefRegs="); printRegMaskInt(byrefRegs); emitDispRegSet(byrefRegs); printf("\n"); } #endif /* Managed RetVal: emit sequence point for the call */ if (emitComp->opts.compDbgInfo && di.IsValid()) { codeGen->genIPmappingAdd(IPmappingDscKind::Normal, di, false); } /* We need to allocate the appropriate instruction descriptor based on whether this is a direct/indirect call, and whether we need to record an updated set of live GC variables. The stats for a ton of classes is as follows: Direct call w/o GC vars 220,216 Indir. call w/o GC vars 144,781 Direct call with GC vars 9,440 Indir. call with GC vars 5,768 */ instrDesc* id; assert(argSize % REGSIZE_BYTES == 0); int argCnt = (int)(argSize / (int)REGSIZE_BYTES); // we need a signed-divide if ((callType == EC_INDIR_R) || (callType == EC_INDIR_ARD)) { /* Indirect call, virtual calls */ id = emitNewInstrCallInd(argCnt, disp, ptrVars, gcrefRegs, byrefRegs, retSize MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize)); } else { // Helper/static/nonvirtual/function calls (direct or through handle), // and calls to an absolute addr. assert(callType == EC_FUNC_TOKEN || callType == EC_FUNC_TOKEN_INDIR); id = emitNewInstrCallDir(argCnt, ptrVars, gcrefRegs, byrefRegs, retSize MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize)); } /* Update the emitter's live GC ref sets */ VarSetOps::Assign(emitComp, emitThisGCrefVars, ptrVars); emitThisGCrefRegs = gcrefRegs; emitThisByrefRegs = byrefRegs; /* Set the instruction - special case jumping a function (tail call) */ instruction ins = INS_call; if (isJump) { if (callType == EC_FUNC_TOKEN) { ins = INS_l_jmp; } else { ins = INS_tail_i_jmp; } } id->idIns(ins); id->idSetIsNoGC(emitNoGChelper(methHnd)); UNATIVE_OFFSET sz; // Record the address: method, indirection, or funcptr if ((callType == EC_INDIR_R) || (callType == EC_INDIR_ARD)) { // This is an indirect call/jmp (either a virtual call or func ptr call) if (callType == EC_INDIR_R) // call reg { id->idSetIsCallRegPtr(); } // The function is "ireg" if id->idIsCallRegPtr(), // else [ireg+xmul*xreg+disp] id->idInsFmt(IF_ARD); id->idAddr()->iiaAddrMode.amBaseReg = ireg; id->idAddr()->iiaAddrMode.amIndxReg = xreg; id->idAddr()->iiaAddrMode.amScale = xmul ? emitEncodeScale(xmul) : emitter::OPSZ1; code_t code = insCodeMR(ins); if (ins == INS_tail_i_jmp) { // Tailcall with addressing mode/register needs to be rex.w // prefixed to be recognized as part of epilog by unwinder. code = AddRexWPrefix(ins, code); } sz = emitInsSizeAM(id, code); if (ireg == REG_NA && xreg == REG_NA) { if (codeGen->genCodeIndirAddrNeedsReloc(disp)) { id->idSetIsDspReloc(); } #ifdef TARGET_AMD64 else { // An absolute indir address that doesn't need reloc should fit within 32-bits // to be encoded as offset relative to zero. This addr mode requires an extra // SIB byte noway_assert((size_t) static_cast<int>(reinterpret_cast<intptr_t>(addr)) == (size_t)addr); sz++; } #endif // TARGET_AMD64 } } else if (callType == EC_FUNC_TOKEN_INDIR) { // call/jmp [method_addr] assert(addr != nullptr); id->idInsFmt(IF_METHPTR); id->idAddr()->iiaAddr = (BYTE*)addr; sz = 6; // Since this is an indirect call through a pointer and we don't // currently pass in emitAttr into this function, we query codegen // whether addr needs a reloc. if (codeGen->genCodeIndirAddrNeedsReloc((size_t)addr)) { id->idSetIsDspReloc(); } #ifdef TARGET_AMD64 else { // An absolute indir address that doesn't need reloc should fit within 32-bits // to be encoded as offset relative to zero. This addr mode requires an extra // SIB byte noway_assert((size_t) static_cast<int>(reinterpret_cast<intptr_t>(addr)) == (size_t)addr); sz++; } #endif // TARGET_AMD64 } else { // This is a simple direct call/jmp: call/jmp helper/method/addr assert(callType == EC_FUNC_TOKEN); assert(addr != nullptr); id->idInsFmt(IF_METHOD); sz = 5; id->idAddr()->iiaAddr = (BYTE*)addr; // Direct call to a method and no addr indirection is needed. if (codeGen->genCodeAddrNeedsReloc((size_t)addr)) { id->idSetIsDspReloc(); } } #ifdef DEBUG if (emitComp->verbose && 0) { if (id->idIsLargeCall()) { printf("[%02u] Rec call GC vars = %s\n", id->idDebugOnlyInfo()->idNum, VarSetOps::ToString(emitComp, ((instrDescCGCA*)id)->idcGCvars)); } } id->idDebugOnlyInfo()->idMemCookie = (size_t)methHnd; // method token id->idDebugOnlyInfo()->idCallSig = sigInfo; #endif // DEBUG #ifdef LATE_DISASM if (addr != nullptr) { codeGen->getDisAssembler().disSetMethod((size_t)addr, methHnd); } #endif // LATE_DISASM id->idCodeSize(sz); dispIns(id); emitCurIGsize += sz; #if !FEATURE_FIXED_OUT_ARGS /* The call will pop the arguments */ if (emitCntStackDepth && argSize > 0) { noway_assert((ssize_t)emitCurStackLvl >= argSize); emitCurStackLvl -= (int)argSize; assert((int)emitCurStackLvl >= 0); } #endif // !FEATURE_FIXED_OUT_ARGS } #ifdef DEBUG /***************************************************************************** * * The following called for each recorded instruction -- use for debugging. */ void emitter::emitInsSanityCheck(instrDesc* id) { // make certain you only try to put relocs on things that can have them. ID_OPS idOp = (ID_OPS)emitFmtToOps[id->idInsFmt()]; if ((idOp == ID_OP_SCNS) && id->idIsLargeCns()) { idOp = ID_OP_CNS; } if (id->idIsDspReloc()) { assert(idOp == ID_OP_NONE || idOp == ID_OP_AMD || idOp == ID_OP_DSP || idOp == ID_OP_DSP_CNS || idOp == ID_OP_AMD_CNS || idOp == ID_OP_SPEC || idOp == ID_OP_CALL || idOp == ID_OP_JMP || idOp == ID_OP_LBL); } if (id->idIsCnsReloc()) { assert(idOp == ID_OP_CNS || idOp == ID_OP_AMD_CNS || idOp == ID_OP_DSP_CNS || idOp == ID_OP_SPEC || idOp == ID_OP_CALL || idOp == ID_OP_JMP); } } #endif /***************************************************************************** * * Return the allocated size (in bytes) of the given instruction descriptor. */ size_t emitter::emitSizeOfInsDsc(instrDesc* id) { if (emitIsScnsInsDsc(id)) { return SMALL_IDSC_SIZE; } assert((unsigned)id->idInsFmt() < emitFmtCount); ID_OPS idOp = (ID_OPS)emitFmtToOps[id->idInsFmt()]; // An INS_call instruction may use a "fat" direct/indirect call descriptor // except for a local call to a label (i.e. call to a finally) // Only ID_OP_CALL and ID_OP_SPEC check for this, so we enforce that the // INS_call instruction always uses one of these idOps if (id->idIns() == INS_call) { assert(idOp == ID_OP_CALL || // is a direct call idOp == ID_OP_SPEC || // is a indirect call idOp == ID_OP_JMP); // is a local call to finally clause } switch (idOp) { case ID_OP_NONE: #if FEATURE_LOOP_ALIGN if (id->idIns() == INS_align) { return sizeof(instrDescAlign); } #endif break; case ID_OP_LBL: return sizeof(instrDescLbl); case ID_OP_JMP: return sizeof(instrDescJmp); case ID_OP_CALL: case ID_OP_SPEC: if (id->idIsLargeCall()) { /* Must be a "fat" indirect call descriptor */ return sizeof(instrDescCGCA); } FALLTHROUGH; case ID_OP_SCNS: case ID_OP_CNS: case ID_OP_DSP: case ID_OP_DSP_CNS: if (id->idIsLargeCns()) { if (id->idIsLargeDsp()) { return sizeof(instrDescCnsDsp); } else { return sizeof(instrDescCns); } } else { if (id->idIsLargeDsp()) { return sizeof(instrDescDsp); } else { return sizeof(instrDesc); } } case ID_OP_AMD: case ID_OP_AMD_CNS: if (id->idIsLargeCns()) { if (id->idIsLargeDsp()) { return sizeof(instrDescCnsAmd); } else { return sizeof(instrDescCns); } } else { if (id->idIsLargeDsp()) { return sizeof(instrDescAmd); } else { return sizeof(instrDesc); } } default: NO_WAY("unexpected instruction descriptor format"); break; } return sizeof(instrDesc); } /*****************************************************************************/ #ifdef DEBUG /***************************************************************************** * * Return a string that represents the given register. */ const char* emitter::emitRegName(regNumber reg, emitAttr attr, bool varName) { static char rb[2][128]; static unsigned char rbc = 0; const char* rn = emitComp->compRegVarName(reg, varName); #ifdef TARGET_AMD64 char suffix = '\0'; switch (EA_SIZE(attr)) { case EA_32BYTE: return emitYMMregName(reg); case EA_16BYTE: return emitXMMregName(reg); case EA_8BYTE: if ((REG_XMM0 <= reg) && (reg <= REG_XMM15)) { return emitXMMregName(reg); } break; case EA_4BYTE: if ((REG_XMM0 <= reg) && (reg <= REG_XMM15)) { return emitXMMregName(reg); } if (reg > REG_R15) { break; } if (reg > REG_RDI) { suffix = 'd'; goto APPEND_SUFFIX; } rbc = (rbc + 1) % 2; rb[rbc][0] = 'e'; rb[rbc][1] = rn[1]; rb[rbc][2] = rn[2]; rb[rbc][3] = 0; rn = rb[rbc]; break; case EA_2BYTE: if (reg > REG_RDI) { suffix = 'w'; goto APPEND_SUFFIX; } rn++; break; case EA_1BYTE: if (reg > REG_RDI) { suffix = 'b'; APPEND_SUFFIX: rbc = (rbc + 1) % 2; rb[rbc][0] = rn[0]; rb[rbc][1] = rn[1]; if (rn[2]) { assert(rn[3] == 0); rb[rbc][2] = rn[2]; rb[rbc][3] = suffix; rb[rbc][4] = 0; } else { rb[rbc][2] = suffix; rb[rbc][3] = 0; } } else { rbc = (rbc + 1) % 2; rb[rbc][0] = rn[1]; if (reg < 4) { rb[rbc][1] = 'l'; rb[rbc][2] = 0; } else { rb[rbc][1] = rn[2]; rb[rbc][2] = 'l'; rb[rbc][3] = 0; } } rn = rb[rbc]; break; default: break; } #endif // TARGET_AMD64 #ifdef TARGET_X86 assert(strlen(rn) >= 3); switch (EA_SIZE(attr)) { case EA_32BYTE: return emitYMMregName(reg); case EA_16BYTE: return emitXMMregName(reg); case EA_8BYTE: if ((REG_XMM0 <= reg) && (reg <= REG_XMM7)) { return emitXMMregName(reg); } break; case EA_4BYTE: if ((REG_XMM0 <= reg) && (reg <= REG_XMM7)) { return emitXMMregName(reg); } break; case EA_2BYTE: rn++; break; case EA_1BYTE: rbc = (rbc + 1) % 2; rb[rbc][0] = rn[1]; rb[rbc][1] = 'l'; strcpy_s(&rb[rbc][2], sizeof(rb[0]) - 2, rn + 3); rn = rb[rbc]; break; default: break; } #endif // TARGET_X86 #if 0 // The following is useful if you want register names to be tagged with * or ^ representing gcref or byref, respectively, // however it's possibly not interesting most of the time. if (EA_IS_GCREF(attr) || EA_IS_BYREF(attr)) { if (rn != rb[rbc]) { rbc = (rbc+1)%2; strcpy_s(rb[rbc], sizeof(rb[rbc]), rn); rn = rb[rbc]; } if (EA_IS_GCREF(attr)) { strcat_s(rb[rbc], sizeof(rb[rbc]), "*"); } else if (EA_IS_BYREF(attr)) { strcat_s(rb[rbc], sizeof(rb[rbc]), "^"); } } #endif // 0 return rn; } /***************************************************************************** * * Return a string that represents the given XMM register. */ const char* emitter::emitXMMregName(unsigned reg) { static const char* const regNames[] = { #define REGDEF(name, rnum, mask, sname) "x" sname, #include "register.h" }; assert(reg < REG_COUNT); assert(reg < ArrLen(regNames)); return regNames[reg]; } /***************************************************************************** * * Return a string that represents the given YMM register. */ const char* emitter::emitYMMregName(unsigned reg) { static const char* const regNames[] = { #define REGDEF(name, rnum, mask, sname) "y" sname, #include "register.h" }; assert(reg < REG_COUNT); assert(reg < ArrLen(regNames)); return regNames[reg]; } /***************************************************************************** * * Display a static data member reference. */ void emitter::emitDispClsVar(CORINFO_FIELD_HANDLE fldHnd, ssize_t offs, bool reloc /* = false */) { int doffs; /* Filter out the special case of fs:[offs] */ // Munge any pointers if we want diff-able disassembly if (emitComp->opts.disDiffable) { ssize_t top12bits = (offs >> 20); if ((top12bits != 0) && (top12bits != -1)) { offs = 0xD1FFAB1E; } } if (fldHnd == FLD_GLOBAL_FS) { printf("FS:[0x%04X]", offs); return; } if (fldHnd == FLD_GLOBAL_DS) { printf("[0x%04X]", offs); return; } printf("["); doffs = Compiler::eeGetJitDataOffs(fldHnd); if (reloc) { printf("reloc "); } if (doffs >= 0) { if (doffs & 1) { printf("@CNS%02u", doffs - 1); } else { printf("@RWD%02u", doffs); } if (offs) { printf("%+Id", offs); } } else { printf("classVar[%#x]", emitComp->dspPtr(fldHnd)); if (offs) { printf("%+Id", offs); } } printf("]"); if (emitComp->opts.varNames && offs < 0) { printf("'%s", emitComp->eeGetFieldName(fldHnd)); if (offs) { printf("%+Id", offs); } printf("'"); } } /***************************************************************************** * * Display a stack frame reference. */ void emitter::emitDispFrameRef(int varx, int disp, int offs, bool asmfm) { int addr; bool bEBP; printf("["); if (!asmfm || emitComp->lvaDoneFrameLayout == Compiler::NO_FRAME_LAYOUT) { if (varx < 0) { printf("TEMP_%02u", -varx); } else { printf("V%02u", +varx); } if (disp < 0) { printf("-0x%X", -disp); } else if (disp > 0) { printf("+0x%X", +disp); } } if (emitComp->lvaDoneFrameLayout == Compiler::FINAL_FRAME_LAYOUT) { if (!asmfm) { printf(" "); } addr = emitComp->lvaFrameAddress(varx, &bEBP) + disp; if (bEBP) { printf(STR_FPBASE); if (addr < 0) { printf("-%02XH", -addr); } else if (addr > 0) { printf("+%02XH", addr); } } else { /* Adjust the offset by amount currently pushed on the stack */ printf(STR_SPBASE); if (addr < 0) { printf("-%02XH", -addr); } else if (addr > 0) { printf("+%02XH", addr); } #if !FEATURE_FIXED_OUT_ARGS if (emitCurStackLvl) printf("+%02XH", emitCurStackLvl); #endif // !FEATURE_FIXED_OUT_ARGS } } printf("]"); if (varx >= 0 && emitComp->opts.varNames) { const char* varName = emitComp->compLocalVarName(varx, offs); if (varName) { printf("'%s", varName); if (disp < 0) { printf("-%d", -disp); } else if (disp > 0) { printf("+%d", +disp); } printf("'"); } } } /***************************************************************************** * * Display a reloc value * If we are formatting for a diffable assembly listing don't print the hex value * since it will prevent us from doing assembly diffs */ void emitter::emitDispReloc(ssize_t value) { if (emitComp->opts.disAsm && emitComp->opts.disDiffable) { printf("(reloc)"); } else { printf("(reloc 0x%Ix)", emitComp->dspPtr(value)); } } /***************************************************************************** * * Display an address mode. */ void emitter::emitDispAddrMode(instrDesc* id, bool noDetail) { bool nsep = false; ssize_t disp; unsigned jtno = 0; dataSection* jdsc = nullptr; /* The displacement field is in an unusual place for calls */ disp = (id->idIns() == INS_call) || (id->idIns() == INS_tail_i_jmp) ? emitGetInsCIdisp(id) : emitGetInsAmdAny(id); /* Display a jump table label if this is a switch table jump */ if (id->idIns() == INS_i_jmp) { UNATIVE_OFFSET offs = 0; /* Find the appropriate entry in the data section list */ for (jdsc = emitConsDsc.dsdList, jtno = 0; jdsc; jdsc = jdsc->dsNext) { UNATIVE_OFFSET size = jdsc->dsSize; /* Is this a label table? */ if (size & 1) { size--; jtno++; if (offs == id->idDebugOnlyInfo()->idMemCookie) { break; } } offs += size; } /* If we've found a matching entry then is a table jump */ if (jdsc) { if (id->idIsDspReloc()) { printf("reloc "); } printf("J_M%03u_DS%02u", emitComp->compMethodID, id->idDebugOnlyInfo()->idMemCookie); disp -= id->idDebugOnlyInfo()->idMemCookie; } } bool frameRef = false; printf("["); if (id->idAddr()->iiaAddrMode.amBaseReg != REG_NA) { printf("%s", emitRegName(id->idAddr()->iiaAddrMode.amBaseReg)); nsep = true; if (id->idAddr()->iiaAddrMode.amBaseReg == REG_ESP) { frameRef = true; } else if (emitComp->isFramePointerUsed() && id->idAddr()->iiaAddrMode.amBaseReg == REG_EBP) { frameRef = true; } } if (id->idAddr()->iiaAddrMode.amIndxReg != REG_NA) { size_t scale = emitDecodeScale(id->idAddr()->iiaAddrMode.amScale); if (nsep) { printf("+"); } if (scale > 1) { printf("%u*", scale); } printf("%s", emitRegName(id->idAddr()->iiaAddrMode.amIndxReg)); nsep = true; } if ((id->idIsDspReloc()) && (id->idIns() != INS_i_jmp)) { if (nsep) { printf("+"); } emitDispReloc(disp); } else { // Munge any pointers if we want diff-able disassembly // It's assumed to be a pointer when disp is outside of the range (-1M, +1M); top bits are not 0 or -1 if (!frameRef && emitComp->opts.disDiffable && (static_cast<size_t>((disp >> 20) + 1) > 1)) { if (nsep) { printf("+"); } printf("D1FFAB1EH"); } else if (disp > 0) { if (nsep) { printf("+"); } if (frameRef) { printf("%02XH", disp); } else if (disp < 1000) { printf("%d", disp); } else if (disp <= 0xFFFF) { printf("%04XH", disp); } else { printf("%08XH", disp); } } else if (disp < 0) { if (frameRef) { printf("-%02XH", -disp); } else if (disp > -1000) { printf("-%d", -disp); } else if (disp >= -0xFFFF) { printf("-%04XH", -disp); } else if (disp < -0xFFFFFF) { if (nsep) { printf("+"); } printf("%08XH", disp); } else { printf("-%08XH", -disp); } } else if (!nsep) { printf("%04XH", disp); } } printf("]"); // pretty print string if it looks like one if ((id->idGCref() == GCT_GCREF) && (id->idIns() == INS_mov) && (id->idAddr()->iiaAddrMode.amBaseReg == REG_NA)) { const WCHAR* str = emitComp->eeGetCPString(disp); if (str != nullptr) { printf(" '%S'", str); } } if (jdsc && !noDetail) { unsigned cnt = (jdsc->dsSize - 1) / TARGET_POINTER_SIZE; BasicBlock** bbp = (BasicBlock**)jdsc->dsCont; #ifdef TARGET_AMD64 #define SIZE_LETTER "Q" #else #define SIZE_LETTER "D" #endif printf("\n\n J_M%03u_DS%02u LABEL " SIZE_LETTER "WORD", emitComp->compMethodID, jtno); /* Display the label table (it's stored as "BasicBlock*" values) */ do { insGroup* lab; /* Convert the BasicBlock* value to an IG address */ lab = (insGroup*)emitCodeGetCookie(*bbp++); assert(lab); printf("\n D" SIZE_LETTER " %s", emitLabelString(lab)); } while (--cnt); } } /***************************************************************************** * * If the given instruction is a shift, display the 2nd operand. */ void emitter::emitDispShift(instruction ins, int cnt) { switch (ins) { case INS_rcl_1: case INS_rcr_1: case INS_rol_1: case INS_ror_1: case INS_shl_1: case INS_shr_1: case INS_sar_1: printf(", 1"); break; case INS_rcl: case INS_rcr: case INS_rol: case INS_ror: case INS_shl: case INS_shr: case INS_sar: printf(", cl"); break; case INS_rcl_N: case INS_rcr_N: case INS_rol_N: case INS_ror_N: case INS_shl_N: case INS_shr_N: case INS_sar_N: printf(", %d", cnt); break; default: break; } } /***************************************************************************** * * Display (optionally) the bytes for the instruction encoding in hex */ void emitter::emitDispInsHex(instrDesc* id, BYTE* code, size_t sz) { // We do not display the instruction hex if we want diff-able disassembly if (!emitComp->opts.disDiffable) { #ifdef TARGET_AMD64 // how many bytes per instruction we format for const size_t digits = 10; #else // TARGET_X86 const size_t digits = 6; #endif printf(" "); for (unsigned i = 0; i < sz; i++) { printf("%02X", (*((BYTE*)(code + i)))); } if (sz < digits) { printf("%.*s", 2 * (digits - sz), " "); } } } //-------------------------------------------------------------------- // emitDispIns: Dump the given instruction to jitstdout. // // Arguments: // id - The instruction // isNew - Whether the instruction is newly generated (before encoding). // doffs - If true, always display the passed-in offset. // asmfm - Whether the instruction should be displayed in assembly format. // If false some additional information may be printed for the instruction. // offset - The offset of the instruction. Only displayed if doffs is true or if // !isNew && !asmfm. // code - Pointer to the actual code, used for displaying the address and encoded bytes // if turned on. // sz - The size of the instruction, used to display the encoded bytes. // ig - The instruction group containing the instruction. Not used on xarch. // void emitter::emitDispIns( instrDesc* id, bool isNew, bool doffs, bool asmfm, unsigned offset, BYTE* code, size_t sz, insGroup* ig) { emitAttr attr; const char* sstr; instruction ins = id->idIns(); if (emitComp->verbose) { unsigned idNum = id->idDebugOnlyInfo()->idNum; printf("IN%04x: ", idNum); } #define ID_INFO_DSP_RELOC ((bool)(id->idIsDspReloc())) /* Display a constant value if the instruction references one */ if (!isNew) { switch (id->idInsFmt()) { int offs; case IF_MRD_RRD: case IF_MWR_RRD: case IF_MRW_RRD: case IF_RRD_MRD: case IF_RWR_MRD: case IF_RRW_MRD: case IF_MRD_CNS: case IF_MWR_CNS: case IF_MRW_CNS: case IF_MRW_SHF: case IF_MRD: case IF_MWR: case IF_MRW: case IF_MRD_OFF: /* Is this actually a reference to a data section? */ offs = Compiler::eeGetJitDataOffs(id->idAddr()->iiaFieldHnd); if (offs >= 0) { void* addr; /* Display a data section reference */ assert((unsigned)offs < emitConsDsc.dsdOffs); addr = emitConsBlock ? emitConsBlock + offs : nullptr; #if 0 // TODO-XArch-Cleanup: Fix or remove this code. /* Is the operand an integer or floating-point value? */ bool isFP = false; if (CodeGen::instIsFP(id->idIns())) { switch (id->idIns()) { case INS_fild: case INS_fildl: break; default: isFP = true; break; } } if (offs & 1) printf("@CNS%02u", offs); else printf("@RWD%02u", offs); printf(" "); if (addr) { addr = 0; // TODO-XArch-Bug?: // This was busted by switching the order // in which we output the code block vs. // the data blocks -- when we get here, // the data block has not been filled in // yet, so we'll display garbage. if (isFP) { if (id->idOpSize() == EA_4BYTE) printf("DF %f \n", addr ? *(float *)addr : 0); else printf("DQ %lf\n", addr ? *(double *)addr : 0); } else { if (id->idOpSize() <= EA_4BYTE) printf("DD %d \n", addr ? *(int *)addr : 0); else printf("DQ %D \n", addr ? *(__int64 *)addr : 0); } } #endif } break; default: break; } } // printf("[F=%s] " , emitIfName(id->idInsFmt())); // printf("INS#%03u: ", id->idDebugOnlyInfo()->idNum); // printf("[S=%02u] " , emitCurStackLvl); if (isNew) printf("[M=%02u] ", emitMaxStackDepth); // printf("[S=%02u] " , emitCurStackLvl/sizeof(INT32)); // printf("[A=%08X] " , emitSimpleStkMask); // printf("[A=%08X] " , emitSimpleByrefStkMask); // printf("[L=%02u] " , id->idCodeSize()); if (!isNew && !asmfm) { doffs = true; } /* Display the instruction address */ emitDispInsAddr(code); /* Display the instruction offset */ emitDispInsOffs(offset, doffs); if (code != nullptr) { /* Display the instruction hex code */ assert(((code >= emitCodeBlock) && (code < emitCodeBlock + emitTotalHotCodeSize)) || ((code >= emitColdCodeBlock) && (code < emitColdCodeBlock + emitTotalColdCodeSize))); emitDispInsHex(id, code + writeableOffset, sz); } /* Display the instruction name */ sstr = codeGen->genInsDisplayName(id); printf(" %-9s", sstr); #ifndef HOST_UNIX if (strnlen_s(sstr, 10) >= 9) #else // HOST_UNIX if (strnlen(sstr, 10) >= 9) #endif // HOST_UNIX { // Make sure there's at least one space after the instruction name, for very long instruction names. printf(" "); } /* By now the size better be set to something */ assert(id->idCodeSize() || emitInstHasNoCode(ins)); /* Figure out the operand size */ if (id->idGCref() == GCT_GCREF) { attr = EA_GCREF; sstr = "gword ptr "; } else if (id->idGCref() == GCT_BYREF) { attr = EA_BYREF; sstr = "bword ptr "; } else { attr = id->idOpSize(); sstr = codeGen->genSizeStr(emitGetMemOpSize(id)); if (ins == INS_lea) { #ifdef TARGET_AMD64 assert((attr == EA_4BYTE) || (attr == EA_8BYTE)); #else assert(attr == EA_4BYTE); #endif sstr = ""; } } /* Now see what instruction format we've got */ // First print the implicit register usage if (instrHasImplicitRegPairDest(ins)) { printf("%s:%s, ", emitRegName(REG_EDX, id->idOpSize()), emitRegName(REG_EAX, id->idOpSize())); } else if (instrIs3opImul(ins)) { regNumber tgtReg = inst3opImulReg(ins); printf("%s, ", emitRegName(tgtReg, id->idOpSize())); } switch (id->idInsFmt()) { ssize_t val; ssize_t offs; CnsVal cnsVal; const char* methodName; case IF_CNS: val = emitGetInsSC(id); #ifdef TARGET_AMD64 // no 8-byte immediates allowed here! assert((val >= (ssize_t)0xFFFFFFFF80000000LL) && (val <= 0x000000007FFFFFFFLL)); #endif if (id->idIsCnsReloc()) { emitDispReloc(val); } else { PRINT_CONSTANT: ssize_t srcVal = val; // Munge any pointers if we want diff-able disassembly if (emitComp->opts.disDiffable) { ssize_t top14bits = (val >> 18); if ((top14bits != 0) && (top14bits != -1)) { val = 0xD1FFAB1E; } } if ((val > -1000) && (val < 1000)) { printf("%d", val); } else if ((val > 0) || (val < -0xFFFFFF)) { printf("0x%IX", val); } else { // (val < 0) printf("-0x%IX", -val); } emitDispCommentForHandle(srcVal, id->idDebugOnlyInfo()->idFlags); } break; case IF_ARD: case IF_AWR: case IF_ARW: if (id->idIsCallRegPtr()) { printf("%s", emitRegName(id->idAddr()->iiaAddrMode.amBaseReg)); } else { // GC ref bit is for the return value for calls, do not print it before the address mode if ((ins != INS_call) && (ins != INS_tail_i_jmp)) { printf("%s", sstr); } emitDispAddrMode(id, isNew); emitDispShift(ins); } if ((ins == INS_call) || (ins == INS_tail_i_jmp)) { assert(id->idInsFmt() == IF_ARD); /* Ignore indirect calls */ if (id->idDebugOnlyInfo()->idMemCookie == 0) { break; } assert(id->idDebugOnlyInfo()->idMemCookie); if (id->idIsCallRegPtr()) { printf(" ; "); } /* This is a virtual call */ methodName = emitComp->eeGetMethodFullName((CORINFO_METHOD_HANDLE)id->idDebugOnlyInfo()->idMemCookie); printf("%s", methodName); } break; case IF_RRD_ARD: case IF_RWR_ARD: case IF_RRW_ARD: #ifdef TARGET_AMD64 if (ins == INS_movsxd) { attr = EA_8BYTE; } else #endif if (ins == INS_movsx || ins == INS_movzx) { attr = EA_PTRSIZE; } else if ((ins == INS_crc32) && (attr != EA_8BYTE)) { // The idReg1 is always 4 bytes, but the size of idReg2 can vary. // This logic ensures that we print `crc32 eax, bx` instead of `crc32 ax, bx` attr = EA_4BYTE; } printf("%s, %s", emitRegName(id->idReg1(), attr), sstr); emitDispAddrMode(id); break; case IF_RRW_ARD_CNS: case IF_RWR_ARD_CNS: { printf("%s, %s", emitRegName(id->idReg1(), attr), sstr); emitDispAddrMode(id); emitGetInsAmdCns(id, &cnsVal); val = cnsVal.cnsVal; printf(", "); if (cnsVal.cnsReloc) { emitDispReloc(val); } else { goto PRINT_CONSTANT; } break; } case IF_AWR_RRD_CNS: { assert(ins == INS_vextracti128 || ins == INS_vextractf128); // vextracti/f128 extracts 128-bit data, so we fix sstr as "xmm ptr" sstr = codeGen->genSizeStr(EA_ATTR(16)); printf(sstr); emitDispAddrMode(id); printf(", %s", emitRegName(id->idReg1(), attr)); emitGetInsAmdCns(id, &cnsVal); val = cnsVal.cnsVal; printf(", "); if (cnsVal.cnsReloc) { emitDispReloc(val); } else { goto PRINT_CONSTANT; } break; } case IF_RWR_RRD_ARD: printf("%s, %s, %s", emitRegName(id->idReg1(), attr), emitRegName(id->idReg2(), attr), sstr); emitDispAddrMode(id); break; case IF_RWR_ARD_RRD: if (ins == INS_vpgatherqd || ins == INS_vgatherqps) { attr = EA_16BYTE; } sstr = codeGen->genSizeStr(EA_ATTR(4)); printf("%s, %s", emitRegName(id->idReg1(), attr), sstr); emitDispAddrMode(id); printf(", %s", emitRegName(id->idReg2(), attr)); break; case IF_RWR_RRD_ARD_CNS: { printf("%s, %s, %s", emitRegName(id->idReg1(), attr), emitRegName(id->idReg2(), attr), sstr); emitDispAddrMode(id); emitGetInsAmdCns(id, &cnsVal); val = cnsVal.cnsVal; printf(", "); if (cnsVal.cnsReloc) { emitDispReloc(val); } else { goto PRINT_CONSTANT; } break; } case IF_RWR_RRD_ARD_RRD: { printf("%s, ", emitRegName(id->idReg1(), attr)); printf("%s, ", emitRegName(id->idReg2(), attr)); emitDispAddrMode(id); emitGetInsAmdCns(id, &cnsVal); val = (cnsVal.cnsVal >> 4) + XMMBASE; printf(", %s", emitRegName((regNumber)val, attr)); break; } case IF_ARD_RRD: case IF_AWR_RRD: case IF_ARW_RRD: printf("%s", sstr); emitDispAddrMode(id); printf(", %s", emitRegName(id->idReg1(), attr)); break; case IF_AWR_RRD_RRD: { printf("%s", sstr); emitDispAddrMode(id); printf(", %s", emitRegName(id->idReg1(), attr)); printf(", %s", emitRegName(id->idReg2(), attr)); break; } case IF_ARD_CNS: case IF_AWR_CNS: case IF_ARW_CNS: case IF_ARW_SHF: printf("%s", sstr); emitDispAddrMode(id); emitGetInsAmdCns(id, &cnsVal); val = cnsVal.cnsVal; #ifdef TARGET_AMD64 // no 8-byte immediates allowed here! assert((val >= (ssize_t)0xFFFFFFFF80000000LL) && (val <= 0x000000007FFFFFFFLL)); #endif if (id->idInsFmt() == IF_ARW_SHF) { emitDispShift(ins, (BYTE)val); } else { printf(", "); if (cnsVal.cnsReloc) { emitDispReloc(val); } else { goto PRINT_CONSTANT; } } break; case IF_SRD: case IF_SWR: case IF_SRW: printf("%s", sstr); #if !FEATURE_FIXED_OUT_ARGS if (ins == INS_pop) emitCurStackLvl -= sizeof(int); #endif emitDispFrameRef(id->idAddr()->iiaLclVar.lvaVarNum(), id->idAddr()->iiaLclVar.lvaOffset(), id->idDebugOnlyInfo()->idVarRefOffs, asmfm); #if !FEATURE_FIXED_OUT_ARGS if (ins == INS_pop) emitCurStackLvl += sizeof(int); #endif emitDispShift(ins); break; case IF_SRD_RRD: case IF_SWR_RRD: case IF_SRW_RRD: printf("%s", sstr); emitDispFrameRef(id->idAddr()->iiaLclVar.lvaVarNum(), id->idAddr()->iiaLclVar.lvaOffset(), id->idDebugOnlyInfo()->idVarRefOffs, asmfm); printf(", %s", emitRegName(id->idReg1(), attr)); break; case IF_SRD_CNS: case IF_SWR_CNS: case IF_SRW_CNS: case IF_SRW_SHF: printf("%s", sstr); emitDispFrameRef(id->idAddr()->iiaLclVar.lvaVarNum(), id->idAddr()->iiaLclVar.lvaOffset(), id->idDebugOnlyInfo()->idVarRefOffs, asmfm); emitGetInsCns(id, &cnsVal); val = cnsVal.cnsVal; #ifdef TARGET_AMD64 // no 8-byte immediates allowed here! assert((val >= (ssize_t)0xFFFFFFFF80000000LL) && (val <= 0x000000007FFFFFFFLL)); #endif if (id->idInsFmt() == IF_SRW_SHF) { emitDispShift(ins, (BYTE)val); } else { printf(", "); if (cnsVal.cnsReloc) { emitDispReloc(val); } else { goto PRINT_CONSTANT; } } break; case IF_SWR_RRD_CNS: assert(ins == INS_vextracti128 || ins == INS_vextractf128); assert(UseVEXEncoding()); emitGetInsAmdCns(id, &cnsVal); printf("%s", sstr); emitDispFrameRef(id->idAddr()->iiaLclVar.lvaVarNum(), id->idAddr()->iiaLclVar.lvaOffset(), id->idDebugOnlyInfo()->idVarRefOffs, asmfm); printf(", %s", emitRegName(id->idReg1(), attr)); val = cnsVal.cnsVal; printf(", "); if (cnsVal.cnsReloc) { emitDispReloc(val); } else { goto PRINT_CONSTANT; } break; case IF_RRD_SRD: case IF_RWR_SRD: case IF_RRW_SRD: #ifdef TARGET_AMD64 if (ins == INS_movsxd) { attr = EA_8BYTE; } else #endif if (ins == INS_movsx || ins == INS_movzx) { attr = EA_PTRSIZE; } else if ((ins == INS_crc32) && (attr != EA_8BYTE)) { // The idReg1 is always 4 bytes, but the size of idReg2 can vary. // This logic ensures that we print `crc32 eax, bx` instead of `crc32 ax, bx` attr = EA_4BYTE; } printf("%s, %s", emitRegName(id->idReg1(), attr), sstr); emitDispFrameRef(id->idAddr()->iiaLclVar.lvaVarNum(), id->idAddr()->iiaLclVar.lvaOffset(), id->idDebugOnlyInfo()->idVarRefOffs, asmfm); break; case IF_RRW_SRD_CNS: case IF_RWR_SRD_CNS: { printf("%s, %s", emitRegName(id->idReg1(), attr), sstr); emitDispFrameRef(id->idAddr()->iiaLclVar.lvaVarNum(), id->idAddr()->iiaLclVar.lvaOffset(), id->idDebugOnlyInfo()->idVarRefOffs, asmfm); emitGetInsCns(id, &cnsVal); val = cnsVal.cnsVal; printf(", "); if (cnsVal.cnsReloc) { emitDispReloc(val); } else { goto PRINT_CONSTANT; } break; } case IF_RWR_RRD_SRD: printf("%s, %s, %s", emitRegName(id->idReg1(), attr), emitRegName(id->idReg2(), attr), sstr); emitDispFrameRef(id->idAddr()->iiaLclVar.lvaVarNum(), id->idAddr()->iiaLclVar.lvaOffset(), id->idDebugOnlyInfo()->idVarRefOffs, asmfm); break; case IF_RWR_RRD_SRD_CNS: { printf("%s, %s, %s", emitRegName(id->idReg1(), attr), emitRegName(id->idReg2(), attr), sstr); emitDispFrameRef(id->idAddr()->iiaLclVar.lvaVarNum(), id->idAddr()->iiaLclVar.lvaOffset(), id->idDebugOnlyInfo()->idVarRefOffs, asmfm); emitGetInsCns(id, &cnsVal); val = cnsVal.cnsVal; printf(", "); if (cnsVal.cnsReloc) { emitDispReloc(val); } else { goto PRINT_CONSTANT; } break; } case IF_RWR_RRD_SRD_RRD: { printf("%s, ", emitRegName(id->idReg1(), attr)); printf("%s, ", emitRegName(id->idReg2(), attr)); emitDispFrameRef(id->idAddr()->iiaLclVar.lvaVarNum(), id->idAddr()->iiaLclVar.lvaOffset(), id->idDebugOnlyInfo()->idVarRefOffs, asmfm); emitGetInsCns(id, &cnsVal); val = (cnsVal.cnsVal >> 4) + XMMBASE; printf(", %s", emitRegName((regNumber)val, attr)); break; } case IF_RRD_RRD: case IF_RWR_RRD: case IF_RRW_RRD: if (ins == INS_pmovmskb) { printf("%s, %s", emitRegName(id->idReg1(), EA_4BYTE), emitRegName(id->idReg2(), attr)); } else if ((ins == INS_cvtsi2ss) || (ins == INS_cvtsi2sd)) { printf(" %s, %s", emitRegName(id->idReg1(), EA_16BYTE), emitRegName(id->idReg2(), attr)); } else if ((ins == INS_cvttsd2si) || (ins == INS_cvtss2si) || (ins == INS_cvtsd2si) || (ins == INS_cvttss2si)) { printf(" %s, %s", emitRegName(id->idReg1(), attr), emitRegName(id->idReg2(), EA_16BYTE)); } #ifdef TARGET_AMD64 else if (ins == INS_movsxd) { printf("%s, %s", emitRegName(id->idReg1(), EA_8BYTE), emitRegName(id->idReg2(), EA_4BYTE)); } #endif // TARGET_AMD64 else if (ins == INS_movsx || ins == INS_movzx) { printf("%s, %s", emitRegName(id->idReg1(), EA_PTRSIZE), emitRegName(id->idReg2(), attr)); } else if (ins == INS_bt) { // INS_bt operands are reversed. Display them in the normal order. printf("%s, %s", emitRegName(id->idReg2(), attr), emitRegName(id->idReg1(), attr)); } #ifdef FEATURE_HW_INTRINSICS else if (ins == INS_crc32 && attr != EA_8BYTE) { // The idReg1 is always 4 bytes, but the size of idReg2 can vary. // This logic ensures that we print `crc32 eax, bx` instead of `crc32 ax, bx` printf("%s, %s", emitRegName(id->idReg1(), EA_4BYTE), emitRegName(id->idReg2(), attr)); } #endif // FEATURE_HW_INTRINSICS else { printf("%s, %s", emitRegName(id->idReg1(), attr), emitRegName(id->idReg2(), attr)); } break; case IF_RRW_RRW: assert(ins == INS_xchg); printf("%s,", emitRegName(id->idReg1(), attr)); printf(" %s", emitRegName(id->idReg2(), attr)); break; case IF_RWR_RRD_RRD: { assert(IsAVXInstruction(ins)); assert(IsThreeOperandAVXInstruction(ins)); regNumber reg2 = id->idReg2(); regNumber reg3 = id->idReg3(); if (ins == INS_bextr || ins == INS_bzhi) { // BMI bextr and bzhi encodes the reg2 in VEX.vvvv and reg3 in modRM, // which is different from most of other instructions regNumber tmp = reg2; reg2 = reg3; reg3 = tmp; } printf("%s, ", emitRegName(id->idReg1(), attr)); printf("%s, ", emitRegName(reg2, attr)); printf("%s", emitRegName(reg3, attr)); break; } case IF_RWR_RRD_RRD_CNS: assert(IsAVXInstruction(ins)); assert(IsThreeOperandAVXInstruction(ins)); printf("%s, ", emitRegName(id->idReg1(), attr)); printf("%s, ", emitRegName(id->idReg2(), attr)); switch (ins) { case INS_vinsertf128: case INS_vinserti128: { attr = EA_16BYTE; break; } case INS_pinsrb: case INS_pinsrw: case INS_pinsrd: { attr = EA_4BYTE; break; } case INS_pinsrq: { attr = EA_8BYTE; break; } default: { break; } } printf("%s, ", emitRegName(id->idReg3(), attr)); val = emitGetInsSC(id); goto PRINT_CONSTANT; break; case IF_RWR_RRD_RRD_RRD: assert(IsAVXOnlyInstruction(ins)); assert(UseVEXEncoding()); printf("%s, ", emitRegName(id->idReg1(), attr)); printf("%s, ", emitRegName(id->idReg2(), attr)); printf("%s, ", emitRegName(id->idReg3(), attr)); printf("%s", emitRegName(id->idReg4(), attr)); break; case IF_RRW_RRW_CNS: { emitAttr tgtAttr = attr; switch (ins) { case INS_vextractf128: case INS_vextracti128: { tgtAttr = EA_16BYTE; break; } case INS_extractps: case INS_pextrb: case INS_pextrw: case INS_pextrw_sse41: case INS_pextrd: { tgtAttr = EA_4BYTE; break; } case INS_pextrq: { tgtAttr = EA_8BYTE; break; } case INS_pinsrb: case INS_pinsrw: case INS_pinsrd: { attr = EA_4BYTE; break; } case INS_pinsrq: { attr = EA_8BYTE; break; } default: { break; } } printf("%s,", emitRegName(id->idReg1(), tgtAttr)); printf(" %s", emitRegName(id->idReg2(), attr)); val = emitGetInsSC(id); #ifdef TARGET_AMD64 // no 8-byte immediates allowed here! assert((val >= (ssize_t)0xFFFFFFFF80000000LL) && (val <= 0x000000007FFFFFFFLL)); #endif printf(", "); if (id->idIsCnsReloc()) { emitDispReloc(val); } else { goto PRINT_CONSTANT; } break; } case IF_RRD: case IF_RWR: case IF_RRW: printf("%s", emitRegName(id->idReg1(), attr)); emitDispShift(ins); break; case IF_RRW_SHF: printf("%s", emitRegName(id->idReg1(), attr)); emitDispShift(ins, (BYTE)emitGetInsSC(id)); break; case IF_RRD_MRD: case IF_RWR_MRD: case IF_RRW_MRD: if (ins == INS_movsx || ins == INS_movzx) { attr = EA_PTRSIZE; } #ifdef TARGET_AMD64 else if (ins == INS_movsxd) { attr = EA_PTRSIZE; } #endif else if ((ins == INS_crc32) && (attr != EA_8BYTE)) { // The idReg1 is always 4 bytes, but the size of idReg2 can vary. // This logic ensures that we print `crc32 eax, bx` instead of `crc32 ax, bx` attr = EA_4BYTE; } printf("%s, %s", emitRegName(id->idReg1(), attr), sstr); offs = emitGetInsDsp(id); emitDispClsVar(id->idAddr()->iiaFieldHnd, offs, ID_INFO_DSP_RELOC); break; case IF_RRW_MRD_CNS: case IF_RWR_MRD_CNS: { printf("%s, %s", emitRegName(id->idReg1(), attr), sstr); offs = emitGetInsDsp(id); emitDispClsVar(id->idAddr()->iiaFieldHnd, offs, ID_INFO_DSP_RELOC); emitGetInsDcmCns(id, &cnsVal); val = cnsVal.cnsVal; printf(", "); if (cnsVal.cnsReloc) { emitDispReloc(val); } else { goto PRINT_CONSTANT; } break; } case IF_MWR_RRD_CNS: { assert(ins == INS_vextracti128 || ins == INS_vextractf128); // vextracti/f128 extracts 128-bit data, so we fix sstr as "xmm ptr" sstr = codeGen->genSizeStr(EA_ATTR(16)); printf(sstr); offs = emitGetInsDsp(id); emitDispClsVar(id->idAddr()->iiaFieldHnd, offs, ID_INFO_DSP_RELOC); printf(", %s", emitRegName(id->idReg1(), attr)); emitGetInsDcmCns(id, &cnsVal); val = cnsVal.cnsVal; printf(", "); if (cnsVal.cnsReloc) { emitDispReloc(val); } else { goto PRINT_CONSTANT; } break; } case IF_RWR_RRD_MRD: printf("%s, %s, %s", emitRegName(id->idReg1(), attr), emitRegName(id->idReg2(), attr), sstr); offs = emitGetInsDsp(id); emitDispClsVar(id->idAddr()->iiaFieldHnd, offs, ID_INFO_DSP_RELOC); break; case IF_RWR_RRD_MRD_CNS: { printf("%s, %s, %s", emitRegName(id->idReg1(), attr), emitRegName(id->idReg2(), attr), sstr); offs = emitGetInsDsp(id); emitDispClsVar(id->idAddr()->iiaFieldHnd, offs, ID_INFO_DSP_RELOC); emitGetInsDcmCns(id, &cnsVal); val = cnsVal.cnsVal; printf(", "); if (cnsVal.cnsReloc) { emitDispReloc(val); } else { goto PRINT_CONSTANT; } break; } case IF_RWR_RRD_MRD_RRD: { printf("%s, ", emitRegName(id->idReg1(), attr)); printf("%s, ", emitRegName(id->idReg2(), attr)); offs = emitGetInsDsp(id); emitDispClsVar(id->idAddr()->iiaFieldHnd, offs, ID_INFO_DSP_RELOC); emitGetInsDcmCns(id, &cnsVal); val = (cnsVal.cnsVal >> 4) + XMMBASE; printf(", %s", emitRegName((regNumber)val, attr)); break; } case IF_RWR_MRD_OFF: printf("%s, %s", emitRegName(id->idReg1(), attr), "offset"); offs = emitGetInsDsp(id); emitDispClsVar(id->idAddr()->iiaFieldHnd, offs, ID_INFO_DSP_RELOC); break; case IF_MRD_RRD: case IF_MWR_RRD: case IF_MRW_RRD: printf("%s", sstr); offs = emitGetInsDsp(id); emitDispClsVar(id->idAddr()->iiaFieldHnd, offs, ID_INFO_DSP_RELOC); printf(", %s", emitRegName(id->idReg1(), attr)); break; case IF_MRD_CNS: case IF_MWR_CNS: case IF_MRW_CNS: case IF_MRW_SHF: printf("%s", sstr); offs = emitGetInsDsp(id); emitDispClsVar(id->idAddr()->iiaFieldHnd, offs, ID_INFO_DSP_RELOC); emitGetInsDcmCns(id, &cnsVal); val = cnsVal.cnsVal; #ifdef TARGET_AMD64 // no 8-byte immediates allowed here! assert((val >= (ssize_t)0xFFFFFFFF80000000LL) && (val <= 0x000000007FFFFFFFLL)); #endif if (cnsVal.cnsReloc) { emitDispReloc(val); } else if (id->idInsFmt() == IF_MRW_SHF) { emitDispShift(ins, (BYTE)val); } else { printf(", "); goto PRINT_CONSTANT; } break; case IF_MRD: case IF_MWR: case IF_MRW: printf("%s", sstr); offs = emitGetInsDsp(id); emitDispClsVar(id->idAddr()->iiaFieldHnd, offs, ID_INFO_DSP_RELOC); emitDispShift(ins); break; case IF_MRD_OFF: printf("offset "); offs = emitGetInsDsp(id); emitDispClsVar(id->idAddr()->iiaFieldHnd, offs, ID_INFO_DSP_RELOC); break; case IF_RRD_CNS: case IF_RWR_CNS: case IF_RRW_CNS: printf("%s, ", emitRegName(id->idReg1(), attr)); val = emitGetInsSC(id); if (id->idIsCnsReloc()) { emitDispReloc(val); } else { goto PRINT_CONSTANT; } break; case IF_LABEL: case IF_RWR_LABEL: case IF_SWR_LABEL: if (ins == INS_lea) { printf("%s, ", emitRegName(id->idReg1(), attr)); } else if (ins == INS_mov) { /* mov dword ptr [frame.callSiteReturnAddress], label */ assert(id->idInsFmt() == IF_SWR_LABEL); instrDescLbl* idlbl = (instrDescLbl*)id; emitDispFrameRef(idlbl->dstLclVar.lvaVarNum(), idlbl->dstLclVar.lvaOffset(), 0, asmfm); printf(", "); } if (((instrDescJmp*)id)->idjShort) { printf("SHORT "); } if (id->idIsBound()) { if (id->idAddr()->iiaHasInstrCount()) { printf("%3d instr", id->idAddr()->iiaGetInstrCount()); } else { emitPrintLabel(id->idAddr()->iiaIGlabel); } } else { printf("L_M%03u_" FMT_BB, emitComp->compMethodID, id->idAddr()->iiaBBlabel->bbNum); } break; case IF_METHOD: case IF_METHPTR: methodName = emitComp->eeGetMethodFullName((CORINFO_METHOD_HANDLE)id->idDebugOnlyInfo()->idMemCookie); if (id->idInsFmt() == IF_METHPTR) { printf("["); } printf("%s", methodName); if (id->idInsFmt() == IF_METHPTR) { printf("]"); } break; case IF_NONE: #if FEATURE_LOOP_ALIGN if (ins == INS_align) { instrDescAlign* alignInstrId = (instrDescAlign*)id; printf("[%d bytes", alignInstrId->idCodeSize()); // targetIG is only set for 1st of the series of align instruction if ((alignInstrId->idaLoopHeadPredIG != nullptr) && (alignInstrId->loopHeadIG() != nullptr)) { printf(" for IG%02u", alignInstrId->loopHeadIG()->igNum); } printf("]"); } #endif break; default: printf("unexpected format %s", emitIfName(id->idInsFmt())); assert(!"unexpectedFormat"); break; } if (sz != 0 && sz != id->idCodeSize() && (!asmfm || emitComp->verbose)) { // Code size in the instrDesc is different from the actual code size we've been given! printf(" (ECS:%d, ACS:%d)", id->idCodeSize(), sz); } printf("\n"); } /*****************************************************************************/ #endif /***************************************************************************** * * Output nBytes bytes of NOP instructions */ static BYTE* emitOutputNOP(BYTE* dstRW, size_t nBytes) { assert(nBytes <= 15); #ifndef TARGET_AMD64 // TODO-X86-CQ: when VIA C3 CPU's are out of circulation, switch to the // more efficient real NOP: 0x0F 0x1F +modR/M // Also can't use AMD recommended, multiple size prefixes (i.e. 0x66 0x66 0x90 for 3 byte NOP) // because debugger and msdis don't like it, so maybe VIA doesn't either // So instead just stick to repeating single byte nops switch (nBytes) { case 15: *dstRW++ = 0x90; FALLTHROUGH; case 14: *dstRW++ = 0x90; FALLTHROUGH; case 13: *dstRW++ = 0x90; FALLTHROUGH; case 12: *dstRW++ = 0x90; FALLTHROUGH; case 11: *dstRW++ = 0x90; FALLTHROUGH; case 10: *dstRW++ = 0x90; FALLTHROUGH; case 9: *dstRW++ = 0x90; FALLTHROUGH; case 8: *dstRW++ = 0x90; FALLTHROUGH; case 7: *dstRW++ = 0x90; FALLTHROUGH; case 6: *dstRW++ = 0x90; FALLTHROUGH; case 5: *dstRW++ = 0x90; FALLTHROUGH; case 4: *dstRW++ = 0x90; FALLTHROUGH; case 3: *dstRW++ = 0x90; FALLTHROUGH; case 2: *dstRW++ = 0x90; FALLTHROUGH; case 1: *dstRW++ = 0x90; break; case 0: break; } #else // TARGET_AMD64 switch (nBytes) { case 2: *dstRW++ = 0x66; FALLTHROUGH; case 1: *dstRW++ = 0x90; break; case 0: break; case 3: *dstRW++ = 0x0F; *dstRW++ = 0x1F; *dstRW++ = 0x00; break; case 4: *dstRW++ = 0x0F; *dstRW++ = 0x1F; *dstRW++ = 0x40; *dstRW++ = 0x00; break; case 6: *dstRW++ = 0x66; FALLTHROUGH; case 5: *dstRW++ = 0x0F; *dstRW++ = 0x1F; *dstRW++ = 0x44; *dstRW++ = 0x00; *dstRW++ = 0x00; break; case 7: *dstRW++ = 0x0F; *dstRW++ = 0x1F; *dstRW++ = 0x80; *dstRW++ = 0x00; *dstRW++ = 0x00; *dstRW++ = 0x00; *dstRW++ = 0x00; break; case 15: // More than 3 prefixes is slower than just 2 NOPs dstRW = emitOutputNOP(emitOutputNOP(dstRW, 7), 8); break; case 14: // More than 3 prefixes is slower than just 2 NOPs dstRW = emitOutputNOP(emitOutputNOP(dstRW, 7), 7); break; case 13: // More than 3 prefixes is slower than just 2 NOPs dstRW = emitOutputNOP(emitOutputNOP(dstRW, 5), 8); break; case 12: // More than 3 prefixes is slower than just 2 NOPs dstRW = emitOutputNOP(emitOutputNOP(dstRW, 4), 8); break; case 11: *dstRW++ = 0x66; FALLTHROUGH; case 10: *dstRW++ = 0x66; FALLTHROUGH; case 9: *dstRW++ = 0x66; FALLTHROUGH; case 8: *dstRW++ = 0x0F; *dstRW++ = 0x1F; *dstRW++ = 0x84; *dstRW++ = 0x00; *dstRW++ = 0x00; *dstRW++ = 0x00; *dstRW++ = 0x00; *dstRW++ = 0x00; break; } #endif // TARGET_AMD64 return dstRW; } //-------------------------------------------------------------------- // emitOutputAlign: Outputs NOP to align the loop // // Arguments: // ig - Current instruction group // id - align instruction that holds amount of padding (NOPs) to add // dst - Destination buffer // // Return Value: // None. // // Notes: // Amount of padding needed to align the loop is already calculated. This // method extracts that information and inserts suitable NOP instructions. // BYTE* emitter::emitOutputAlign(insGroup* ig, instrDesc* id, BYTE* dst) { instrDescAlign* alignInstr = (instrDescAlign*)id; #ifdef DEBUG // For cases where 'align' was placed behing a 'jmp' in an IG that does not // immediately preced the loop IG, we do not know in advance the offset of // IG having loop. For such cases, skip the padding calculation validation. bool validatePadding = !alignInstr->isPlacedAfterJmp; #endif // Candidate for loop alignment assert(codeGen->ShouldAlignLoops()); assert(ig->endsWithAlignInstr()); unsigned paddingToAdd = id->idCodeSize(); // Either things are already aligned or align them here. assert(!validatePadding || (paddingToAdd == 0) || (((size_t)dst & (emitComp->opts.compJitAlignLoopBoundary - 1)) != 0)); // Padding amount should not exceed the alignment boundary assert(0 <= paddingToAdd && paddingToAdd < emitComp->opts.compJitAlignLoopBoundary); #ifdef DEBUG if (validatePadding) { unsigned paddingNeeded = emitCalculatePaddingForLoopAlignment(((instrDescAlign*)id)->idaIG->igNext, (size_t)dst, true); // For non-adaptive, padding size is spread in multiple instructions, so don't bother checking if (emitComp->opts.compJitAlignLoopAdaptive) { assert(paddingToAdd == paddingNeeded); } } emitComp->loopsAligned++; #endif BYTE* dstRW = dst + writeableOffset; #ifdef DEBUG // Under STRESS_EMITTER, if this is the 'align' before the 'jmp' instruction, // then add "int3" instruction. Since int3 takes 1 byte, we would only add // it if paddingToAdd >= 1 byte. if (emitComp->compStressCompile(Compiler::STRESS_EMITTER, 50) && alignInstr->isPlacedAfterJmp && paddingToAdd >= 1) { size_t int3Code = insCodeMR(INS_BREAKPOINT); // There is no good way to squeeze in "int3" as well as display it // in the disassembly because there is no corresponding instrDesc for // it. As such, leave it as is, the "0xCC" bytecode will be seen next // to the nop instruction in disasm. // e.g. CC align [1 bytes for IG29] // // if (emitComp->opts.disAsm) //{ // emitDispInsAddr(dstRW); // emitDispInsOffs(0, false); // printf(" %-9s ; stress-mode injected interrupt\n", "int3"); //} dstRW += emitOutputByte(dstRW, int3Code); paddingToAdd -= 1; } #endif dstRW = emitOutputNOP(dstRW, paddingToAdd); return dstRW - writeableOffset; } /***************************************************************************** * * Output an instruction involving an address mode. */ BYTE* emitter::emitOutputAM(BYTE* dst, instrDesc* id, code_t code, CnsVal* addc) { regNumber reg; regNumber rgx; ssize_t dsp; bool dspInByte; bool dspIsZero; bool isMoffset = false; instruction ins = id->idIns(); emitAttr size = id->idOpSize(); size_t opsz = EA_SIZE_IN_BYTES(size); // Get the base/index registers reg = id->idAddr()->iiaAddrMode.amBaseReg; rgx = id->idAddr()->iiaAddrMode.amIndxReg; // For INS_call the instruction size is actually the return value size if ((ins == INS_call) || (ins == INS_tail_i_jmp)) { if (ins == INS_tail_i_jmp) { // tail call with addressing mode (or through register) needs rex.w // prefix to be recognized by unwinder as part of epilog. code = AddRexWPrefix(ins, code); } // Special case: call via a register if (id->idIsCallRegPtr()) { code = insEncodeMRreg(ins, reg, EA_PTRSIZE, code); dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code); dst += emitOutputWord(dst, code); goto DONE; } // The displacement field is in an unusual place for calls dsp = emitGetInsCIdisp(id); #ifdef TARGET_AMD64 // Compute the REX prefix if it exists if (IsExtendedReg(reg, EA_PTRSIZE)) { insEncodeReg012(ins, reg, EA_PTRSIZE, &code); // TODO-Cleanup: stop casting RegEncoding() back to a regNumber. reg = (regNumber)RegEncoding(reg); } if (IsExtendedReg(rgx, EA_PTRSIZE)) { insEncodeRegSIB(ins, rgx, &code); // TODO-Cleanup: stop casting RegEncoding() back to a regNumber. rgx = (regNumber)RegEncoding(rgx); } // And emit the REX prefix dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code); #endif // TARGET_AMD64 goto GOT_DSP; } // `addc` is used for two kinds if instructions // 1. ins like ADD that can have reg/mem and const versions both and const version needs to modify the opcode for // large constant operand (e.g., imm32) // 2. certain SSE/AVX ins have const operand as control bits that is always 1-Byte (imm8) even if `size` > 1-Byte if (addc && (size > EA_1BYTE)) { ssize_t cval = addc->cnsVal; // Does the constant fit in a byte? // SSE/AVX do not need to modify opcode if ((signed char)cval == cval && addc->cnsReloc == false && ins != INS_mov && ins != INS_test) { if (id->idInsFmt() != IF_ARW_SHF && !IsSSEOrAVXInstruction(ins)) { code |= 2; } opsz = 1; } } #ifdef TARGET_X86 else { // Special case: "mov eax, [addr]" and "mov [addr], eax" // Amd64: this is one case where addr can be 64-bit in size. This is // currently unused or not enabled on amd64 as it always uses RIP // relative addressing which results in smaller instruction size. if ((ins == INS_mov) && (id->idReg1() == REG_EAX) && (reg == REG_NA) && (rgx == REG_NA)) { switch (id->idInsFmt()) { case IF_RWR_ARD: assert(code == (insCodeRM(ins) | (insEncodeReg345(ins, REG_EAX, EA_PTRSIZE, NULL) << 8))); code &= ~((code_t)0xFFFFFFFF); code |= 0xA0; isMoffset = true; break; case IF_AWR_RRD: assert(code == (insCodeMR(ins) | (insEncodeReg345(ins, REG_EAX, EA_PTRSIZE, NULL) << 8))); code &= ~((code_t)0xFFFFFFFF); code |= 0xA2; isMoffset = true; break; default: break; } } } #endif // TARGET_X86 // Emit VEX prefix if required // There are some callers who already add VEX prefix and call this routine. // Therefore, add VEX prefix is one is not already present. code = AddVexPrefixIfNeededAndNotPresent(ins, code, size); // For this format, moves do not support a third operand, so we only need to handle the binary ops. if (TakesVexPrefix(ins)) { if (IsDstDstSrcAVXInstruction(ins)) { regNumber src1 = REG_NA; switch (id->idInsFmt()) { case IF_RWR_RRD_ARD: case IF_RWR_ARD_RRD: case IF_RWR_RRD_ARD_CNS: case IF_RWR_RRD_ARD_RRD: { src1 = id->idReg2(); break; } default: { src1 = id->idReg1(); break; } } // encode source operand reg in 'vvvv' bits in 1's complement form code = insEncodeReg3456(ins, src1, size, code); } else if (IsDstSrcSrcAVXInstruction(ins)) { code = insEncodeReg3456(ins, id->idReg2(), size, code); } } // Emit the REX prefix if required if (TakesRexWPrefix(ins, size)) { code = AddRexWPrefix(ins, code); } if (IsExtendedReg(reg, EA_PTRSIZE)) { insEncodeReg012(ins, reg, EA_PTRSIZE, &code); // TODO-Cleanup: stop casting RegEncoding() back to a regNumber. reg = (regNumber)RegEncoding(reg); } if (IsExtendedReg(rgx, EA_PTRSIZE)) { insEncodeRegSIB(ins, rgx, &code); // TODO-Cleanup: stop casting RegEncoding() back to a regNumber. rgx = (regNumber)RegEncoding(rgx); } // Special case emitting AVX instructions if (EncodedBySSE38orSSE3A(ins) || (ins == INS_crc32)) { if ((ins == INS_crc32) && (size > EA_1BYTE)) { code |= 0x0100; if (size == EA_2BYTE) { dst += emitOutputByte(dst, 0x66); } } regNumber reg345 = REG_NA; if (IsBMIInstruction(ins)) { reg345 = getBmiRegNumber(ins); } if (reg345 == REG_NA) { switch (id->idInsFmt()) { case IF_AWR_RRD_RRD: { reg345 = id->idReg2(); break; } default: { reg345 = id->idReg1(); break; } } } unsigned regcode = insEncodeReg345(ins, reg345, size, &code); dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code); if (UseVEXEncoding() && (ins != INS_crc32)) { // Emit last opcode byte // TODO-XArch-CQ: Right now support 4-byte opcode instructions only assert((code & 0xFF) == 0); dst += emitOutputByte(dst, (code >> 8) & 0xFF); } else { dst += emitOutputWord(dst, code >> 16); dst += emitOutputWord(dst, code & 0xFFFF); } code = regcode; } // Is this a 'big' opcode? else if (code & 0xFF000000) { // Output the REX prefix dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code); // Output the highest word of the opcode // We need to check again as in case of AVX instructions leading opcode bytes are stripped off // and encoded as part of VEX prefix. if (code & 0xFF000000) { dst += emitOutputWord(dst, code >> 16); code &= 0x0000FFFF; } } else if (code & 0x00FF0000) { // BT supports 16 bit operands and this code doesn't handle the necessary 66 prefix. assert(ins != INS_bt); // Output the REX prefix dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code); // Output the highest byte of the opcode if (code & 0x00FF0000) { dst += emitOutputByte(dst, code >> 16); code &= 0x0000FFFF; } // Use the large version if this is not a byte. This trick will not // work in case of SSE2 and AVX instructions. if ((size != EA_1BYTE) && HasRegularWideForm(ins)) { code |= 0x1; } } else if (CodeGen::instIsFP(ins)) { assert(size == EA_4BYTE || size == EA_8BYTE); if (size == EA_8BYTE) { code += 4; } } else if (!IsSSEInstruction(ins) && !IsAVXInstruction(ins)) { /* Is the operand size larger than a byte? */ switch (size) { case EA_1BYTE: break; case EA_2BYTE: /* Output a size prefix for a 16-bit operand */ dst += emitOutputByte(dst, 0x66); FALLTHROUGH; case EA_4BYTE: #ifdef TARGET_AMD64 case EA_8BYTE: #endif /* Set the 'w' bit to get the large version */ code |= 0x1; break; #ifdef TARGET_X86 case EA_8BYTE: /* Double operand - set the appropriate bit */ code |= 0x04; break; #endif // TARGET_X86 default: NO_WAY("unexpected size"); break; } } // Output the REX prefix dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code); // Get the displacement value dsp = emitGetInsAmdAny(id); GOT_DSP: dspInByte = ((signed char)dsp == (ssize_t)dsp); dspIsZero = (dsp == 0); if (id->idIsDspReloc()) { dspInByte = false; // relocs can't be placed in a byte } if (isMoffset) { #ifdef TARGET_AMD64 // This code path should never be hit on amd64 since it always uses RIP relative addressing. // In future if ever there is a need to enable this special case, also enable the logic // that sets isMoffset to true on amd64. unreached(); #else // TARGET_X86 dst += emitOutputByte(dst, code); dst += emitOutputSizeT(dst, dsp); if (id->idIsDspReloc()) { emitRecordRelocation((void*)(dst - TARGET_POINTER_SIZE), (void*)dsp, IMAGE_REL_BASED_MOFFSET); } #endif // TARGET_X86 } // Is there a [scaled] index component? else if (rgx == REG_NA) { // The address is of the form "[reg+disp]" switch (reg) { case REG_NA: { if (id->idIsDspReloc()) { INT32 addlDelta = 0; // The address is of the form "[disp]" // On x86 - disp is relative to zero // On Amd64 - disp is relative to RIP if (EncodedBySSE38orSSE3A(ins) || (ins == INS_crc32)) { dst += emitOutputByte(dst, code | 0x05); } else { dst += emitOutputWord(dst, code | 0x0500); } if (addc) { // It is of the form "ins [disp], imm" or "ins reg, [disp], imm" // For emitting relocation, we also need to take into account of the // additional bytes of code emitted for immed val. ssize_t cval = addc->cnsVal; #ifdef TARGET_AMD64 // all these opcodes only take a sign-extended 4-byte immediate noway_assert(opsz < 8 || ((int)cval == cval && !addc->cnsReloc)); #else // TARGET_X86 noway_assert(opsz <= 4); #endif // TARGET_X86 switch (opsz) { case 0: case 4: case 8: addlDelta = -4; break; case 2: addlDelta = -2; break; case 1: addlDelta = -1; break; default: assert(!"unexpected operand size"); unreached(); } } #ifdef TARGET_AMD64 // We emit zero on Amd64, to avoid the assert in emitOutputLong() dst += emitOutputLong(dst, 0); #else dst += emitOutputLong(dst, dsp); #endif emitRecordRelocation((void*)(dst - sizeof(INT32)), (void*)dsp, IMAGE_REL_BASED_DISP32, 0, addlDelta); } else { #ifdef TARGET_X86 if (EncodedBySSE38orSSE3A(ins) || (ins == INS_crc32)) { dst += emitOutputByte(dst, code | 0x05); } else { dst += emitOutputWord(dst, code | 0x0500); } #else // TARGET_AMD64 // Amd64: addr fits within 32-bits and can be encoded as a displacement relative to zero. // This addr mode should never be used while generating relocatable ngen code nor if // the addr can be encoded as pc-relative address. noway_assert(!emitComp->opts.compReloc); noway_assert(codeGen->genAddrRelocTypeHint((size_t)dsp) != IMAGE_REL_BASED_REL32); noway_assert((int)dsp == dsp); // This requires, specifying a SIB byte after ModRM byte. if (EncodedBySSE38orSSE3A(ins) || (ins == INS_crc32)) { dst += emitOutputByte(dst, code | 0x04); } else { dst += emitOutputWord(dst, code | 0x0400); } dst += emitOutputByte(dst, 0x25); #endif // TARGET_AMD64 dst += emitOutputLong(dst, dsp); } break; } case REG_EBP: { if (EncodedBySSE38orSSE3A(ins) || (ins == INS_crc32)) { // Does the offset fit in a byte? if (dspInByte) { dst += emitOutputByte(dst, code | 0x45); dst += emitOutputByte(dst, dsp); } else { dst += emitOutputByte(dst, code | 0x85); dst += emitOutputLong(dst, dsp); if (id->idIsDspReloc()) { emitRecordRelocation((void*)(dst - sizeof(INT32)), (void*)dsp, IMAGE_REL_BASED_HIGHLOW); } } } else { // Does the offset fit in a byte? if (dspInByte) { dst += emitOutputWord(dst, code | 0x4500); dst += emitOutputByte(dst, dsp); } else { dst += emitOutputWord(dst, code | 0x8500); dst += emitOutputLong(dst, dsp); if (id->idIsDspReloc()) { emitRecordRelocation((void*)(dst - sizeof(INT32)), (void*)dsp, IMAGE_REL_BASED_HIGHLOW); } } } break; } case REG_ESP: { if (EncodedBySSE38orSSE3A(ins) || (ins == INS_crc32)) { // Is the offset 0 or does it at least fit in a byte? if (dspIsZero) { dst += emitOutputByte(dst, code | 0x04); dst += emitOutputByte(dst, 0x24); } else if (dspInByte) { dst += emitOutputByte(dst, code | 0x44); dst += emitOutputByte(dst, 0x24); dst += emitOutputByte(dst, dsp); } else { dst += emitOutputByte(dst, code | 0x84); dst += emitOutputByte(dst, 0x24); dst += emitOutputLong(dst, dsp); if (id->idIsDspReloc()) { emitRecordRelocation((void*)(dst - sizeof(INT32)), (void*)dsp, IMAGE_REL_BASED_HIGHLOW); } } } else { // Is the offset 0 or does it at least fit in a byte? if (dspIsZero) { dst += emitOutputWord(dst, code | 0x0400); dst += emitOutputByte(dst, 0x24); } else if (dspInByte) { dst += emitOutputWord(dst, code | 0x4400); dst += emitOutputByte(dst, 0x24); dst += emitOutputByte(dst, dsp); } else { dst += emitOutputWord(dst, code | 0x8400); dst += emitOutputByte(dst, 0x24); dst += emitOutputLong(dst, dsp); if (id->idIsDspReloc()) { emitRecordRelocation((void*)(dst - sizeof(INT32)), (void*)dsp, IMAGE_REL_BASED_HIGHLOW); } } } break; } default: { if (EncodedBySSE38orSSE3A(ins) || (ins == INS_crc32)) { // Put the register in the opcode code |= insEncodeReg012(ins, reg, EA_PTRSIZE, nullptr); // Is there a displacement? if (dspIsZero) { // This is simply "[reg]" dst += emitOutputByte(dst, code); } else { // This is [reg + dsp]" -- does the offset fit in a byte? if (dspInByte) { dst += emitOutputByte(dst, code | 0x40); dst += emitOutputByte(dst, dsp); } else { dst += emitOutputByte(dst, code | 0x80); dst += emitOutputLong(dst, dsp); if (id->idIsDspReloc()) { emitRecordRelocation((void*)(dst - sizeof(INT32)), (void*)dsp, IMAGE_REL_BASED_HIGHLOW); } } } } else { // Put the register in the opcode code |= insEncodeReg012(ins, reg, EA_PTRSIZE, nullptr) << 8; // Is there a displacement? if (dspIsZero) { // This is simply "[reg]" dst += emitOutputWord(dst, code); } else { // This is [reg + dsp]" -- does the offset fit in a byte? if (dspInByte) { dst += emitOutputWord(dst, code | 0x4000); dst += emitOutputByte(dst, dsp); } else { dst += emitOutputWord(dst, code | 0x8000); dst += emitOutputLong(dst, dsp); if (id->idIsDspReloc()) { emitRecordRelocation((void*)(dst - sizeof(INT32)), (void*)dsp, IMAGE_REL_BASED_HIGHLOW); } } } } break; } } } else { unsigned regByte; // We have a scaled index operand unsigned mul = emitDecodeScale(id->idAddr()->iiaAddrMode.amScale); // Is the index operand scaled? if (mul > 1) { // Is there a base register? if (reg != REG_NA) { // The address is "[reg + {2/4/8} * rgx + icon]" regByte = insEncodeReg012(ins, reg, EA_PTRSIZE, nullptr) | insEncodeReg345(ins, rgx, EA_PTRSIZE, nullptr) | insSSval(mul); if (EncodedBySSE38orSSE3A(ins) || (ins == INS_crc32)) { // Emit [ebp + {2/4/8} * rgz] as [ebp + {2/4/8} * rgx + 0] if (dspIsZero && reg != REG_EBP) { // The address is "[reg + {2/4/8} * rgx]" dst += emitOutputByte(dst, code | 0x04); dst += emitOutputByte(dst, regByte); } else { // The address is "[reg + {2/4/8} * rgx + disp]" if (dspInByte) { dst += emitOutputByte(dst, code | 0x44); dst += emitOutputByte(dst, regByte); dst += emitOutputByte(dst, dsp); } else { dst += emitOutputByte(dst, code | 0x84); dst += emitOutputByte(dst, regByte); dst += emitOutputLong(dst, dsp); if (id->idIsDspReloc()) { emitRecordRelocation((void*)(dst - sizeof(INT32)), (void*)dsp, IMAGE_REL_BASED_HIGHLOW); } } } } else { // Emit [ebp + {2/4/8} * rgz] as [ebp + {2/4/8} * rgx + 0] if (dspIsZero && reg != REG_EBP) { // The address is "[reg + {2/4/8} * rgx]" dst += emitOutputWord(dst, code | 0x0400); dst += emitOutputByte(dst, regByte); } else { // The address is "[reg + {2/4/8} * rgx + disp]" if (dspInByte) { dst += emitOutputWord(dst, code | 0x4400); dst += emitOutputByte(dst, regByte); dst += emitOutputByte(dst, dsp); } else { dst += emitOutputWord(dst, code | 0x8400); dst += emitOutputByte(dst, regByte); dst += emitOutputLong(dst, dsp); if (id->idIsDspReloc()) { emitRecordRelocation((void*)(dst - sizeof(INT32)), (void*)dsp, IMAGE_REL_BASED_HIGHLOW); } } } } } else { // The address is "[{2/4/8} * rgx + icon]" regByte = insEncodeReg012(ins, REG_EBP, EA_PTRSIZE, nullptr) | insEncodeReg345(ins, rgx, EA_PTRSIZE, nullptr) | insSSval(mul); if (EncodedBySSE38orSSE3A(ins) || (ins == INS_crc32)) { dst += emitOutputByte(dst, code | 0x04); } else { dst += emitOutputWord(dst, code | 0x0400); } dst += emitOutputByte(dst, regByte); // Special case: jump through a jump table if (ins == INS_i_jmp) { dsp += (size_t)emitConsBlock; } dst += emitOutputLong(dst, dsp); if (id->idIsDspReloc()) { emitRecordRelocation((void*)(dst - sizeof(INT32)), (void*)dsp, IMAGE_REL_BASED_HIGHLOW); } } } else { // The address is "[reg+rgx+dsp]" regByte = insEncodeReg012(ins, reg, EA_PTRSIZE, nullptr) | insEncodeReg345(ins, rgx, EA_PTRSIZE, nullptr); if (EncodedBySSE38orSSE3A(ins) || (ins == INS_crc32)) { if (dspIsZero && reg != REG_EBP) { // This is [reg+rgx]" dst += emitOutputByte(dst, code | 0x04); dst += emitOutputByte(dst, regByte); } else { // This is [reg+rgx+dsp]" -- does the offset fit in a byte? if (dspInByte) { dst += emitOutputByte(dst, code | 0x44); dst += emitOutputByte(dst, regByte); dst += emitOutputByte(dst, dsp); } else { dst += emitOutputByte(dst, code | 0x84); dst += emitOutputByte(dst, regByte); dst += emitOutputLong(dst, dsp); if (id->idIsDspReloc()) { emitRecordRelocation((void*)(dst - sizeof(INT32)), (void*)dsp, IMAGE_REL_BASED_HIGHLOW); } } } } else { if (dspIsZero && reg != REG_EBP) { // This is [reg+rgx]" dst += emitOutputWord(dst, code | 0x0400); dst += emitOutputByte(dst, regByte); } else { // This is [reg+rgx+dsp]" -- does the offset fit in a byte? if (dspInByte) { dst += emitOutputWord(dst, code | 0x4400); dst += emitOutputByte(dst, regByte); dst += emitOutputByte(dst, dsp); } else { dst += emitOutputWord(dst, code | 0x8400); dst += emitOutputByte(dst, regByte); dst += emitOutputLong(dst, dsp); if (id->idIsDspReloc()) { emitRecordRelocation((void*)(dst - sizeof(INT32)), (void*)dsp, IMAGE_REL_BASED_HIGHLOW); } } } } } } // Now generate the constant value, if present if (addc) { ssize_t cval = addc->cnsVal; #ifdef TARGET_AMD64 // all these opcodes only take a sign-extended 4-byte immediate noway_assert(opsz < 8 || ((int)cval == cval && !addc->cnsReloc)); #endif switch (opsz) { case 0: case 4: case 8: dst += emitOutputLong(dst, cval); break; case 2: dst += emitOutputWord(dst, cval); break; case 1: dst += emitOutputByte(dst, cval); break; default: assert(!"unexpected operand size"); } if (addc->cnsReloc) { emitRecordRelocation((void*)(dst - sizeof(INT32)), (void*)(size_t)cval, IMAGE_REL_BASED_HIGHLOW); assert(opsz == 4); } } DONE: // Does this instruction operate on a GC ref value? if (id->idGCref()) { switch (id->idInsFmt()) { case IF_ARD: case IF_AWR: case IF_ARW: break; case IF_RRD_ARD: break; case IF_RWR_ARD: emitGCregLiveUpd(id->idGCref(), id->idReg1(), dst); break; case IF_RRW_ARD: // Mark the destination register as holding a GCT_BYREF assert(id->idGCref() == GCT_BYREF && (ins == INS_add || ins == INS_sub)); emitGCregLiveUpd(GCT_BYREF, id->idReg1(), dst); break; case IF_ARD_RRD: case IF_AWR_RRD: break; case IF_AWR_RRD_RRD: break; case IF_ARD_CNS: case IF_AWR_CNS: break; case IF_ARW_RRD: case IF_ARW_CNS: assert(id->idGCref() == GCT_BYREF && (ins == INS_add || ins == INS_sub)); break; default: #ifdef DEBUG emitDispIns(id, false, false, false); #endif assert(!"unexpected GC ref instruction format"); } // mul can never produce a GC ref assert(!instrIs3opImul(ins)); assert(ins != INS_mulEAX && ins != INS_imulEAX); } else { if (!emitInsCanOnlyWriteSSE2OrAVXReg(id)) { switch (id->idInsFmt()) { case IF_RWR_ARD: case IF_RRW_ARD: case IF_RWR_RRD_ARD: emitGCregDeadUpd(id->idReg1(), dst); break; default: break; } if (ins == INS_mulEAX || ins == INS_imulEAX) { emitGCregDeadUpd(REG_EAX, dst); emitGCregDeadUpd(REG_EDX, dst); } // For the three operand imul instruction the target register // is encoded in the opcode if (instrIs3opImul(ins)) { regNumber tgtReg = inst3opImulReg(ins); emitGCregDeadUpd(tgtReg, dst); } } } return dst; } /***************************************************************************** * * Output an instruction involving a stack frame value. */ BYTE* emitter::emitOutputSV(BYTE* dst, instrDesc* id, code_t code, CnsVal* addc) { int adr; int dsp; bool EBPbased; bool dspInByte; bool dspIsZero; instruction ins = id->idIns(); emitAttr size = id->idOpSize(); size_t opsz = EA_SIZE_IN_BYTES(size); assert(ins != INS_imul || id->idReg1() == REG_EAX || size == EA_4BYTE || size == EA_8BYTE); // `addc` is used for two kinds if instructions // 1. ins like ADD that can have reg/mem and const versions both and const version needs to modify the opcode for // large constant operand (e.g., imm32) // 2. certain SSE/AVX ins have const operand as control bits that is always 1-Byte (imm8) even if `size` > 1-Byte if (addc && (size > EA_1BYTE)) { ssize_t cval = addc->cnsVal; // Does the constant fit in a byte? // SSE/AVX do not need to modify opcode if ((signed char)cval == cval && addc->cnsReloc == false && ins != INS_mov && ins != INS_test) { if ((id->idInsFmt() != IF_SRW_SHF) && (id->idInsFmt() != IF_RRW_SRD_CNS) && (id->idInsFmt() != IF_RWR_RRD_SRD_CNS) && !IsSSEOrAVXInstruction(ins)) { code |= 2; } opsz = 1; } } // Add VEX prefix if required. // There are some callers who already add VEX prefix and call this routine. // Therefore, add VEX prefix is one is not already present. code = AddVexPrefixIfNeededAndNotPresent(ins, code, size); // Compute the REX prefix if (TakesRexWPrefix(ins, size)) { code = AddRexWPrefix(ins, code); } // Special case emitting AVX instructions if (EncodedBySSE38orSSE3A(ins) || (ins == INS_crc32)) { if ((ins == INS_crc32) && (size > EA_1BYTE)) { code |= 0x0100; if (size == EA_2BYTE) { dst += emitOutputByte(dst, 0x66); } } regNumber reg345 = REG_NA; if (IsBMIInstruction(ins)) { reg345 = getBmiRegNumber(ins); } if (reg345 == REG_NA) { reg345 = id->idReg1(); } else { code = insEncodeReg3456(ins, id->idReg1(), size, code); } unsigned regcode = insEncodeReg345(ins, reg345, size, &code); dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code); if (UseVEXEncoding() && (ins != INS_crc32)) { // Emit last opcode byte // TODO-XArch-CQ: Right now support 4-byte opcode instructions only assert((code & 0xFF) == 0); dst += emitOutputByte(dst, (code >> 8) & 0xFF); } else { dst += emitOutputWord(dst, code >> 16); dst += emitOutputWord(dst, code & 0xFFFF); } code = regcode; } // Is this a 'big' opcode? else if (code & 0xFF000000) { // Output the REX prefix dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code); // Output the highest word of the opcode // We need to check again because in case of AVX instructions the leading // escape byte(s) (e.g. 0x0F) will be encoded as part of VEX prefix. if (code & 0xFF000000) { dst += emitOutputWord(dst, code >> 16); code &= 0x0000FFFF; } } else if (code & 0x00FF0000) { // BT supports 16 bit operands and this code doesn't add the necessary 66 prefix. assert(ins != INS_bt); // Output the REX prefix dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code); // Output the highest byte of the opcode. // We need to check again because in case of AVX instructions the leading // escape byte(s) (e.g. 0x0F) will be encoded as part of VEX prefix. if (code & 0x00FF0000) { dst += emitOutputByte(dst, code >> 16); code &= 0x0000FFFF; } // Use the large version if this is not a byte if ((size != EA_1BYTE) && HasRegularWideForm(ins)) { code |= 0x1; } } else if (CodeGen::instIsFP(ins)) { assert(size == EA_4BYTE || size == EA_8BYTE); if (size == EA_8BYTE) { code += 4; } } else if (!IsSSEInstruction(ins) && !IsAVXInstruction(ins)) { // Is the operand size larger than a byte? switch (size) { case EA_1BYTE: break; case EA_2BYTE: // Output a size prefix for a 16-bit operand dst += emitOutputByte(dst, 0x66); FALLTHROUGH; case EA_4BYTE: #ifdef TARGET_AMD64 case EA_8BYTE: #endif // TARGET_AMD64 /* Set the 'w' size bit to indicate 32-bit operation * Note that incrementing "code" for INS_call (0xFF) would * overflow, whereas setting the lower bit to 1 just works out */ code |= 0x01; break; #ifdef TARGET_X86 case EA_8BYTE: // Double operand - set the appropriate bit. // I don't know what a legitimate reason to end up in this case would be // considering that FP is taken care of above... // what is an instruction that takes a double which is not covered by the // above instIsFP? Of the list in instrsxarch, only INS_fprem code |= 0x04; NO_WAY("bad 8 byte op"); break; #endif // TARGET_X86 default: NO_WAY("unexpected size"); break; } } // Output the REX prefix dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code); // Figure out the variable's frame position int varNum = id->idAddr()->iiaLclVar.lvaVarNum(); adr = emitComp->lvaFrameAddress(varNum, &EBPbased); dsp = adr + id->idAddr()->iiaLclVar.lvaOffset(); dspInByte = ((signed char)dsp == (int)dsp); dspIsZero = (dsp == 0); // for stack varaibles the dsp should never be a reloc assert(id->idIsDspReloc() == 0); if (EBPbased) { // EBP-based variable: does the offset fit in a byte? if (EncodedBySSE38orSSE3A(ins) || (ins == INS_crc32)) { if (dspInByte) { dst += emitOutputByte(dst, code | 0x45); dst += emitOutputByte(dst, dsp); } else { dst += emitOutputByte(dst, code | 0x85); dst += emitOutputLong(dst, dsp); } } else { if (dspInByte) { dst += emitOutputWord(dst, code | 0x4500); dst += emitOutputByte(dst, dsp); } else { dst += emitOutputWord(dst, code | 0x8500); dst += emitOutputLong(dst, dsp); } } } else { #if !FEATURE_FIXED_OUT_ARGS // Adjust the offset by the amount currently pushed on the CPU stack dsp += emitCurStackLvl; #endif dspInByte = ((signed char)dsp == (int)dsp); dspIsZero = (dsp == 0); // Does the offset fit in a byte? if (EncodedBySSE38orSSE3A(ins) || (ins == INS_crc32)) { if (dspInByte) { if (dspIsZero) { dst += emitOutputByte(dst, code | 0x04); dst += emitOutputByte(dst, 0x24); } else { dst += emitOutputByte(dst, code | 0x44); dst += emitOutputByte(dst, 0x24); dst += emitOutputByte(dst, dsp); } } else { dst += emitOutputByte(dst, code | 0x84); dst += emitOutputByte(dst, 0x24); dst += emitOutputLong(dst, dsp); } } else { if (dspInByte) { if (dspIsZero) { dst += emitOutputWord(dst, code | 0x0400); dst += emitOutputByte(dst, 0x24); } else { dst += emitOutputWord(dst, code | 0x4400); dst += emitOutputByte(dst, 0x24); dst += emitOutputByte(dst, dsp); } } else { dst += emitOutputWord(dst, code | 0x8400); dst += emitOutputByte(dst, 0x24); dst += emitOutputLong(dst, dsp); } } } // Now generate the constant value, if present if (addc) { ssize_t cval = addc->cnsVal; #ifdef TARGET_AMD64 // all these opcodes only take a sign-extended 4-byte immediate noway_assert(opsz < 8 || ((int)cval == cval && !addc->cnsReloc)); #endif switch (opsz) { case 0: case 4: case 8: dst += emitOutputLong(dst, cval); break; case 2: dst += emitOutputWord(dst, cval); break; case 1: dst += emitOutputByte(dst, cval); break; default: assert(!"unexpected operand size"); } if (addc->cnsReloc) { emitRecordRelocation((void*)(dst - sizeof(INT32)), (void*)(size_t)cval, IMAGE_REL_BASED_HIGHLOW); assert(opsz == 4); } } // Does this instruction operate on a GC ref value? if (id->idGCref()) { // Factor in the sub-variable offset adr += AlignDown(id->idAddr()->iiaLclVar.lvaOffset(), TARGET_POINTER_SIZE); switch (id->idInsFmt()) { case IF_SRD: // Read stack -- no change break; case IF_SWR: // Stack Write (So we need to update GC live for stack var) // Write stack -- GC var may be born emitGCvarLiveUpd(adr, varNum, id->idGCref(), dst DEBUG_ARG(varNum)); break; case IF_SRD_CNS: // Read stack -- no change break; case IF_SWR_CNS: // Write stack -- no change break; case IF_SRD_RRD: case IF_RRD_SRD: // Read stack , read register -- no change break; case IF_RWR_SRD: // Register Write, Stack Read (So we need to update GC live for register) // Read stack , write register -- GC reg may be born emitGCregLiveUpd(id->idGCref(), id->idReg1(), dst); break; case IF_SWR_RRD: // Stack Write, Register Read (So we need to update GC live for stack var) // Read register, write stack -- GC var may be born emitGCvarLiveUpd(adr, varNum, id->idGCref(), dst DEBUG_ARG(varNum)); break; case IF_RRW_SRD: // Register Read/Write, Stack Read (So we need to update GC live for register) // reg could have been a GCREF as GCREF + int=BYREF // or BYREF+/-int=BYREF assert(id->idGCref() == GCT_BYREF && (ins == INS_add || ins == INS_sub)); emitGCregLiveUpd(id->idGCref(), id->idReg1(), dst); break; case IF_SRW_CNS: case IF_SRW_RRD: // += -= of a byref, no change case IF_SRW: break; default: #ifdef DEBUG emitDispIns(id, false, false, false); #endif assert(!"unexpected GC ref instruction format"); } } else { if (!emitInsCanOnlyWriteSSE2OrAVXReg(id)) { switch (id->idInsFmt()) { case IF_RWR_SRD: // Register Write, Stack Read case IF_RRW_SRD: // Register Read/Write, Stack Read case IF_RWR_RRD_SRD: emitGCregDeadUpd(id->idReg1(), dst); break; default: break; } if (ins == INS_mulEAX || ins == INS_imulEAX) { emitGCregDeadUpd(REG_EAX, dst); emitGCregDeadUpd(REG_EDX, dst); } // For the three operand imul instruction the target register // is encoded in the opcode if (instrIs3opImul(ins)) { regNumber tgtReg = inst3opImulReg(ins); emitGCregDeadUpd(tgtReg, dst); } } } return dst; } /***************************************************************************** * * Output an instruction with a static data member (class variable). */ BYTE* emitter::emitOutputCV(BYTE* dst, instrDesc* id, code_t code, CnsVal* addc) { BYTE* addr; CORINFO_FIELD_HANDLE fldh; ssize_t offs; int doff; emitAttr size = id->idOpSize(); size_t opsz = EA_SIZE_IN_BYTES(size); instruction ins = id->idIns(); bool isMoffset = false; // Get hold of the field handle and offset fldh = id->idAddr()->iiaFieldHnd; offs = emitGetInsDsp(id); // Special case: mov reg, fs:[ddd] if (fldh == FLD_GLOBAL_FS) { dst += emitOutputByte(dst, 0x64); } // Compute VEX prefix // Some of its callers already add VEX prefix and then call this routine. // Therefore add VEX prefix is not already present. code = AddVexPrefixIfNeededAndNotPresent(ins, code, size); // Compute the REX prefix if (TakesRexWPrefix(ins, size)) { code = AddRexWPrefix(ins, code); } // `addc` is used for two kinds if instructions // 1. ins like ADD that can have reg/mem and const versions both and const version needs to modify the opcode for // large constant operand (e.g., imm32) // 2. certain SSE/AVX ins have const operand as control bits that is always 1-Byte (imm8) even if `size` > 1-Byte if (addc && (size > EA_1BYTE)) { ssize_t cval = addc->cnsVal; // Does the constant fit in a byte? if ((signed char)cval == cval && addc->cnsReloc == false && ins != INS_mov && ins != INS_test) { // SSE/AVX do not need to modify opcode if (id->idInsFmt() != IF_MRW_SHF && !IsSSEOrAVXInstruction(ins)) { code |= 2; } opsz = 1; } } #ifdef TARGET_X86 else { // Special case: "mov eax, [addr]" and "mov [addr], eax" // Amd64: this is one case where addr can be 64-bit in size. This is // currently unused or not enabled on amd64 as it always uses RIP // relative addressing which results in smaller instruction size. if (ins == INS_mov && id->idReg1() == REG_EAX) { switch (id->idInsFmt()) { case IF_RWR_MRD: assert(code == (insCodeRM(ins) | (insEncodeReg345(ins, REG_EAX, EA_PTRSIZE, NULL) << 8) | 0x0500)); code &= ~((code_t)0xFFFFFFFF); code |= 0xA0; isMoffset = true; break; case IF_MWR_RRD: assert(code == (insCodeMR(ins) | (insEncodeReg345(ins, REG_EAX, EA_PTRSIZE, NULL) << 8) | 0x0500)); code &= ~((code_t)0xFFFFFFFF); code |= 0xA2; isMoffset = true; break; default: break; } } } #endif // TARGET_X86 // Special case emitting AVX instructions if (EncodedBySSE38orSSE3A(ins) || (ins == INS_crc32)) { if ((ins == INS_crc32) && (size > EA_1BYTE)) { code |= 0x0100; if (size == EA_2BYTE) { dst += emitOutputByte(dst, 0x66); } } regNumber reg345 = REG_NA; if (IsBMIInstruction(ins)) { reg345 = getBmiRegNumber(ins); } if (reg345 == REG_NA) { reg345 = id->idReg1(); } else { code = insEncodeReg3456(ins, id->idReg1(), size, code); } unsigned regcode = insEncodeReg345(ins, reg345, size, &code); dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code); if (UseVEXEncoding() && (ins != INS_crc32)) { // Emit last opcode byte // TODO-XArch-CQ: Right now support 4-byte opcode instructions only assert((code & 0xFF) == 0); dst += emitOutputByte(dst, (code >> 8) & 0xFF); } else { dst += emitOutputWord(dst, code >> 16); dst += emitOutputWord(dst, code & 0xFFFF); } // Emit Mod,R/M byte dst += emitOutputByte(dst, regcode | 0x05); code = 0; } // Is this a 'big' opcode? else if (code & 0xFF000000) { // Output the REX prefix dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code); // Output the highest word of the opcode. // Check again since AVX instructions encode leading opcode bytes as part of VEX prefix. if (code & 0xFF000000) { dst += emitOutputWord(dst, code >> 16); } code &= 0x0000FFFF; } else if (code & 0x00FF0000) { // Output the REX prefix dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code); // Check again as VEX prefix would have encoded leading opcode byte if (code & 0x00FF0000) { dst += emitOutputByte(dst, code >> 16); code &= 0x0000FFFF; } if (size != EA_1BYTE && HasRegularWideForm(ins)) { code |= 0x1; } } else if (CodeGen::instIsFP(ins)) { assert(size == EA_4BYTE || size == EA_8BYTE); if (size == EA_8BYTE) { code += 4; } } else { // Is the operand size larger than a byte? switch (size) { case EA_1BYTE: break; case EA_2BYTE: // Output a size prefix for a 16-bit operand dst += emitOutputByte(dst, 0x66); FALLTHROUGH; case EA_4BYTE: #ifdef TARGET_AMD64 case EA_8BYTE: #endif // Set the 'w' bit to get the large version code |= 0x1; break; #ifdef TARGET_X86 case EA_8BYTE: // Double operand - set the appropriate bit code |= 0x04; break; #endif // TARGET_X86 default: assert(!"unexpected size"); } } // Output the REX prefix dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code); if (code) { if (id->idInsFmt() == IF_MRD_OFF || id->idInsFmt() == IF_RWR_MRD_OFF || isMoffset) { dst += emitOutputByte(dst, code); } else { dst += emitOutputWord(dst, code); } } // Do we have a constant or a static data member? doff = Compiler::eeGetJitDataOffs(fldh); if (doff >= 0) { addr = emitConsBlock + doff; #ifdef DEBUG int byteSize = EA_SIZE_IN_BYTES(emitGetMemOpSize(id)); // Check that the offset is properly aligned (i.e. the ddd in [ddd]) // When SMALL_CODE is set, we only expect 4-byte alignment, otherwise // we expect the same alignment as the size of the constant. assert((emitChkAlign == false) || (ins == INS_lea) || ((emitComp->compCodeOpt() == Compiler::SMALL_CODE) && (((size_t)addr & 3) == 0)) || (((size_t)addr & (byteSize - 1)) == 0)); #endif // DEBUG } else { // Special case: mov reg, fs:[ddd] or mov reg, [ddd] if (jitStaticFldIsGlobAddr(fldh)) { addr = nullptr; } else { addr = (BYTE*)emitComp->info.compCompHnd->getFieldAddress(fldh, nullptr); if (addr == nullptr) { NO_WAY("could not obtain address of static field"); } } } BYTE* target = (addr + offs); if (!isMoffset) { INT32 addlDelta = 0; if (addc) { // It is of the form "ins [disp], imm" or "ins reg, [disp], imm" // For emitting relocation, we also need to take into account of the // additional bytes of code emitted for immed val. ssize_t cval = addc->cnsVal; #ifdef TARGET_AMD64 // all these opcodes only take a sign-extended 4-byte immediate noway_assert(opsz < 8 || ((int)cval == cval && !addc->cnsReloc)); #else // TARGET_X86 noway_assert(opsz <= 4); #endif // TARGET_X86 switch (opsz) { case 0: case 4: case 8: addlDelta = -4; break; case 2: addlDelta = -2; break; case 1: addlDelta = -1; break; default: assert(!"unexpected operand size"); unreached(); } } #ifdef TARGET_AMD64 // All static field and data section constant accesses should be marked as relocatable noway_assert(id->idIsDspReloc()); dst += emitOutputLong(dst, 0); #else // TARGET_X86 dst += emitOutputLong(dst, (int)(ssize_t)target); #endif // TARGET_X86 if (id->idIsDspReloc()) { emitRecordRelocation((void*)(dst - sizeof(int)), target, IMAGE_REL_BASED_DISP32, 0, addlDelta); } } else { #ifdef TARGET_AMD64 // This code path should never be hit on amd64 since it always uses RIP relative addressing. // In future if ever there is a need to enable this special case, also enable the logic // that sets isMoffset to true on amd64. unreached(); #else // TARGET_X86 dst += emitOutputSizeT(dst, (ssize_t)target); if (id->idIsDspReloc()) { emitRecordRelocation((void*)(dst - TARGET_POINTER_SIZE), target, IMAGE_REL_BASED_MOFFSET); } #endif // TARGET_X86 } // Now generate the constant value, if present if (addc) { ssize_t cval = addc->cnsVal; #ifdef TARGET_AMD64 // all these opcodes only take a sign-extended 4-byte immediate noway_assert(opsz < 8 || ((int)cval == cval && !addc->cnsReloc)); #endif switch (opsz) { case 0: case 4: case 8: dst += emitOutputLong(dst, cval); break; case 2: dst += emitOutputWord(dst, cval); break; case 1: dst += emitOutputByte(dst, cval); break; default: assert(!"unexpected operand size"); } if (addc->cnsReloc) { emitRecordRelocation((void*)(dst - sizeof(INT32)), (void*)(size_t)cval, IMAGE_REL_BASED_HIGHLOW); assert(opsz == 4); } } // Does this instruction operate on a GC ref value? if (id->idGCref()) { switch (id->idInsFmt()) { case IF_MRD: case IF_MRW: case IF_MWR: break; case IF_RRD_MRD: break; case IF_RWR_MRD: emitGCregLiveUpd(id->idGCref(), id->idReg1(), dst); break; case IF_MRD_RRD: case IF_MWR_RRD: case IF_MRW_RRD: break; case IF_MRD_CNS: case IF_MWR_CNS: case IF_MRW_CNS: break; case IF_RRW_MRD: assert(id->idGCref() == GCT_BYREF); assert(ins == INS_add || ins == INS_sub); // Mark it as holding a GCT_BYREF emitGCregLiveUpd(GCT_BYREF, id->idReg1(), dst); break; default: #ifdef DEBUG emitDispIns(id, false, false, false); #endif assert(!"unexpected GC ref instruction format"); } } else { if (!emitInsCanOnlyWriteSSE2OrAVXReg(id)) { switch (id->idInsFmt()) { case IF_RWR_MRD: case IF_RRW_MRD: case IF_RWR_RRD_MRD: emitGCregDeadUpd(id->idReg1(), dst); break; default: break; } if (ins == INS_mulEAX || ins == INS_imulEAX) { emitGCregDeadUpd(REG_EAX, dst); emitGCregDeadUpd(REG_EDX, dst); } // For the three operand imul instruction the target register // is encoded in the opcode if (instrIs3opImul(ins)) { regNumber tgtReg = inst3opImulReg(ins); emitGCregDeadUpd(tgtReg, dst); } } } return dst; } /***************************************************************************** * * Output an instruction with one register operand. */ BYTE* emitter::emitOutputR(BYTE* dst, instrDesc* id) { code_t code; instruction ins = id->idIns(); regNumber reg = id->idReg1(); emitAttr size = id->idOpSize(); // We would to update GC info correctly assert(!IsSSEInstruction(ins)); assert(!IsAVXInstruction(ins)); // Get the 'base' opcode switch (ins) { case INS_inc: case INS_dec: #ifdef TARGET_AMD64 if (true) #else if (size == EA_1BYTE) #endif { assert(INS_inc_l == INS_inc + 1); assert(INS_dec_l == INS_dec + 1); // Can't use the compact form, use the long form ins = (instruction)(ins + 1); if (size == EA_2BYTE) { // Output a size prefix for a 16-bit operand dst += emitOutputByte(dst, 0x66); } code = insCodeRR(ins); if (size != EA_1BYTE) { // Set the 'w' bit to get the large version code |= 0x1; } if (TakesRexWPrefix(ins, size)) { code = AddRexWPrefix(ins, code); } // Register... unsigned regcode = insEncodeReg012(ins, reg, size, &code); // Output the REX prefix dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code); dst += emitOutputWord(dst, code | (regcode << 8)); } else { if (size == EA_2BYTE) { // Output a size prefix for a 16-bit operand dst += emitOutputByte(dst, 0x66); } dst += emitOutputByte(dst, insCodeRR(ins) | insEncodeReg012(ins, reg, size, nullptr)); } break; case INS_pop: case INS_pop_hide: case INS_push: case INS_push_hide: assert(size == EA_PTRSIZE); code = insEncodeOpreg(ins, reg, size); assert(!TakesVexPrefix(ins)); assert(!TakesRexWPrefix(ins, size)); // Output the REX prefix dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code); dst += emitOutputByte(dst, code); break; case INS_bswap: { assert(size >= EA_4BYTE && size <= EA_PTRSIZE); // 16-bit BSWAP is undefined // The Intel instruction set reference for BSWAP states that extended registers // should be enabled via REX.R, but per Vol. 2A, Sec. 2.2.1.2 (see also Figure 2-7), // REX.B should instead be used if the register is encoded in the opcode byte itself. // Therefore the default logic of insEncodeReg012 is correct for this case. code = insCodeRR(ins); if (TakesRexWPrefix(ins, size)) { code = AddRexWPrefix(ins, code); } // Register... unsigned regcode = insEncodeReg012(ins, reg, size, &code); // Output the REX prefix dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code); dst += emitOutputWord(dst, code | (regcode << 8)); break; } case INS_seto: case INS_setno: case INS_setb: case INS_setae: case INS_sete: case INS_setne: case INS_setbe: case INS_seta: case INS_sets: case INS_setns: case INS_setp: case INS_setnp: case INS_setl: case INS_setge: case INS_setle: case INS_setg: assert(id->idGCref() == GCT_NONE); assert(size == EA_1BYTE); code = insEncodeMRreg(ins, reg, EA_1BYTE, insCodeMR(ins)); // Output the REX prefix dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code); // We expect this to always be a 'big' opcode assert(code & 0x00FF0000); dst += emitOutputByte(dst, code >> 16); dst += emitOutputWord(dst, code & 0x0000FFFF); break; case INS_mulEAX: case INS_imulEAX: // Kill off any GC refs in EAX or EDX emitGCregDeadUpd(REG_EAX, dst); emitGCregDeadUpd(REG_EDX, dst); FALLTHROUGH; default: assert(id->idGCref() == GCT_NONE); code = insEncodeMRreg(ins, reg, size, insCodeMR(ins)); if (size != EA_1BYTE) { // Set the 'w' bit to get the large version code |= 0x1; if (size == EA_2BYTE) { // Output a size prefix for a 16-bit operand dst += emitOutputByte(dst, 0x66); } } code = AddVexPrefixIfNeeded(ins, code, size); if (TakesRexWPrefix(ins, size)) { code = AddRexWPrefix(ins, code); } // Output the REX prefix dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code); dst += emitOutputWord(dst, code); break; } // Are we writing the register? if so then update the GC information switch (id->idInsFmt()) { case IF_RRD: break; case IF_RWR: if (id->idGCref()) { emitGCregLiveUpd(id->idGCref(), id->idReg1(), dst); } else { emitGCregDeadUpd(id->idReg1(), dst); } break; case IF_RRW: { #ifdef DEBUG regMaskTP regMask = genRegMask(reg); #endif if (id->idGCref()) { assert(ins == INS_inc || ins == INS_dec || ins == INS_inc_l || ins == INS_dec_l); // We would like to assert that the reg must currently be holding either a gcref or a byref. // However, we can see cases where a LCLHEAP generates a non-gcref value into a register, // and the first instruction we generate after the LCLHEAP is an `inc` that is typed as // byref. We'll properly create the byref gcinfo when this happens. // assert((emitThisGCrefRegs | emitThisByrefRegs) & regMask); assert(id->idGCref() == GCT_BYREF); // Mark it as holding a GCT_BYREF emitGCregLiveUpd(GCT_BYREF, id->idReg1(), dst); } else { // Can't use RRW to trash a GC ref. It's OK for unverifiable code // to trash Byrefs. assert((emitThisGCrefRegs & regMask) == 0); } } break; default: #ifdef DEBUG emitDispIns(id, false, false, false); #endif assert(!"unexpected instruction format"); break; } return dst; } /***************************************************************************** * * Output an instruction with two register operands. */ BYTE* emitter::emitOutputRR(BYTE* dst, instrDesc* id) { code_t code; instruction ins = id->idIns(); regNumber reg1 = id->idReg1(); regNumber reg2 = id->idReg2(); emitAttr size = id->idOpSize(); if (IsSSEOrAVXInstruction(ins)) { assert((ins != INS_movd) || (isFloatReg(reg1) != isFloatReg(reg2))); if ((ins != INS_movd) || isFloatReg(reg1)) { code = insCodeRM(ins); } else { code = insCodeMR(ins); } code = AddVexPrefixIfNeeded(ins, code, size); code = insEncodeRMreg(ins, code); if (TakesRexWPrefix(ins, size)) { code = AddRexWPrefix(ins, code); } } else if ((ins == INS_movsx) || (ins == INS_movzx) || (insIsCMOV(ins))) { assert(hasCodeRM(ins) && !hasCodeMI(ins) && !hasCodeMR(ins)); code = insCodeRM(ins); code = AddVexPrefixIfNeeded(ins, code, size); code = insEncodeRMreg(ins, code) | (int)(size == EA_2BYTE); #ifdef TARGET_AMD64 assert((size < EA_4BYTE) || (insIsCMOV(ins))); if ((size == EA_8BYTE) || (ins == INS_movsx)) { code = AddRexWPrefix(ins, code); } } else if (ins == INS_movsxd) { assert(hasCodeRM(ins) && !hasCodeMI(ins) && !hasCodeMR(ins)); code = insCodeRM(ins); code = AddVexPrefixIfNeeded(ins, code, size); code = insEncodeRMreg(ins, code); #endif // TARGET_AMD64 } #ifdef FEATURE_HW_INTRINSICS else if ((ins == INS_bsf) || (ins == INS_bsr) || (ins == INS_crc32) || (ins == INS_lzcnt) || (ins == INS_popcnt) || (ins == INS_tzcnt)) { assert(hasCodeRM(ins) && !hasCodeMI(ins) && !hasCodeMR(ins)); code = insCodeRM(ins); code = AddVexPrefixIfNeeded(ins, code, size); code = insEncodeRMreg(ins, code); if ((ins == INS_crc32) && (size > EA_1BYTE)) { code |= 0x0100; } if (size == EA_2BYTE) { assert(ins == INS_crc32); dst += emitOutputByte(dst, 0x66); } else if (size == EA_8BYTE) { code = AddRexWPrefix(ins, code); } } #endif // FEATURE_HW_INTRINSICS else { assert(!TakesVexPrefix(ins)); code = insCodeMR(ins); code = insEncodeMRreg(ins, code); if (ins != INS_test) { code |= 2; } switch (size) { case EA_1BYTE: noway_assert(RBM_BYTE_REGS & genRegMask(reg1)); noway_assert(RBM_BYTE_REGS & genRegMask(reg2)); break; case EA_2BYTE: // Output a size prefix for a 16-bit operand dst += emitOutputByte(dst, 0x66); FALLTHROUGH; case EA_4BYTE: // Set the 'w' bit to get the large version code |= 0x1; break; #ifdef TARGET_AMD64 case EA_8BYTE: // TODO-AMD64-CQ: Better way to not emit REX.W when we don't need it // Don't need to zero out the high bits explicitly if ((ins != INS_xor) || (reg1 != reg2)) { code = AddRexWPrefix(ins, code); } else { id->idOpSize(EA_4BYTE); } // Set the 'w' bit to get the large version code |= 0x1; break; #endif // TARGET_AMD64 default: assert(!"unexpected size"); } } regNumber regFor012Bits = reg2; regNumber regFor345Bits = REG_NA; if (IsBMIInstruction(ins)) { regFor345Bits = getBmiRegNumber(ins); } if (regFor345Bits == REG_NA) { regFor345Bits = reg1; } if (ins == INS_movd) { assert(isFloatReg(reg1) != isFloatReg(reg2)); if (isFloatReg(reg2)) { std::swap(regFor012Bits, regFor345Bits); } } unsigned regCode = insEncodeReg345(ins, regFor345Bits, size, &code); regCode |= insEncodeReg012(ins, regFor012Bits, size, &code); if (TakesVexPrefix(ins)) { // In case of AVX instructions that take 3 operands, we generally want to encode reg1 // as first source. In this case, reg1 is both a source and a destination. // The exception is the "merge" 3-operand case, where we have a move instruction, such // as movss, and we want to merge the source with itself. // // TODO-XArch-CQ: Eventually we need to support 3 operand instruction formats. For // now we use the single source as source1 and source2. if (IsDstDstSrcAVXInstruction(ins)) { // encode source/dest operand reg in 'vvvv' bits in 1's complement form code = insEncodeReg3456(ins, reg1, size, code); } else if (IsDstSrcSrcAVXInstruction(ins)) { // encode source operand reg in 'vvvv' bits in 1's complement form code = insEncodeReg3456(ins, reg2, size, code); } } // Output the REX prefix dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code); if (code & 0xFF000000) { // Output the highest word of the opcode dst += emitOutputWord(dst, code >> 16); code &= 0x0000FFFF; if (Is4ByteSSEInstruction(ins)) { // Output 3rd byte of the opcode dst += emitOutputByte(dst, code); code &= 0xFF00; } } else if (code & 0x00FF0000) { dst += emitOutputByte(dst, code >> 16); code &= 0x0000FFFF; } // TODO-XArch-CQ: Right now support 4-byte opcode instructions only if ((code & 0xFF00) == 0xC000) { dst += emitOutputWord(dst, code | (regCode << 8)); } else if ((code & 0xFF) == 0x00) { // This case happens for some SSE/AVX instructions only assert(IsAVXInstruction(ins) || Is4ByteSSEInstruction(ins)); dst += emitOutputByte(dst, (code >> 8) & 0xFF); dst += emitOutputByte(dst, (0xC0 | regCode)); } else { dst += emitOutputWord(dst, code); dst += emitOutputByte(dst, (0xC0 | regCode)); } // Does this instruction operate on a GC ref value? if (id->idGCref()) { switch (id->idInsFmt()) { case IF_RRD_RRD: break; case IF_RWR_RRD: if (emitSyncThisObjReg != REG_NA && emitIGisInProlog(emitCurIG) && reg2 == (int)REG_ARG_0) { // We're relocating "this" in the prolog assert(emitComp->lvaIsOriginalThisArg(0)); assert(emitComp->lvaTable[0].lvRegister); assert(emitComp->lvaTable[0].GetRegNum() == reg1); if (emitFullGCinfo) { emitGCregLiveSet(id->idGCref(), genRegMask(reg1), dst, true); break; } else { /* If emitFullGCinfo==false, the we don't use any regPtrDsc's and so explictly note the location of "this" in GCEncode.cpp */ } } emitGCregLiveUpd(id->idGCref(), reg1, dst); break; case IF_RRW_RRD: switch (id->idIns()) { /* This must be one of the following cases: xor reg, reg to assign NULL and r1 , r2 if (ptr1 && ptr2) ... or r1 , r2 if (ptr1 || ptr2) ... add r1 , r2 to compute a normal byref sub r1 , r2 to compute a strange byref (VC only) */ case INS_xor: assert(reg1 == reg2); emitGCregLiveUpd(id->idGCref(), reg1, dst); break; case INS_or: case INS_and: emitGCregDeadUpd(reg1, dst); break; case INS_add: case INS_sub: assert(id->idGCref() == GCT_BYREF); #if 0 #ifdef DEBUG // Due to elided register moves, we can't have the following assert. // For example, consider: // t85 = LCL_VAR byref V01 arg1 rdx (last use) REG rdx // /--* t85 byref // * STORE_LCL_VAR byref V40 tmp31 rdx REG rdx // Here, V01 is type `long` on entry, then is stored as a byref. But because // the register allocator assigned the same register, no instruction was // generated, and we only (currently) make gcref/byref changes in emitter GC info // when an instruction is generated. We still generate correct GC info, as this // instruction, if writing a GC ref even through reading a long, will go live here. // These situations typically occur due to unsafe casting, such as with Span<T>. regMaskTP regMask; regMask = genRegMask(reg1) | genRegMask(reg2); // r1/r2 could have been a GCREF as GCREF + int=BYREF // or BYREF+/-int=BYREF assert(((regMask & emitThisGCrefRegs) && (ins == INS_add)) || ((regMask & emitThisByrefRegs) && (ins == INS_add || ins == INS_sub))); #endif // DEBUG #endif // 0 // Mark r1 as holding a byref emitGCregLiveUpd(GCT_BYREF, reg1, dst); break; default: #ifdef DEBUG emitDispIns(id, false, false, false); #endif assert(!"unexpected GC reg update instruction"); } break; case IF_RRW_RRW: // This must be "xchg reg1, reg2" assert(id->idIns() == INS_xchg); // If we got here, the GC-ness of the registers doesn't match, so we have to "swap" them in the GC // register pointer mask. GCtype gc1, gc2; gc1 = emitRegGCtype(reg1); gc2 = emitRegGCtype(reg2); if (gc1 != gc2) { // Kill the GC-info about the GC registers if (needsGC(gc1)) { emitGCregDeadUpd(reg1, dst); } if (needsGC(gc2)) { emitGCregDeadUpd(reg2, dst); } // Now, swap the info if (needsGC(gc1)) { emitGCregLiveUpd(gc1, reg2, dst); } if (needsGC(gc2)) { emitGCregLiveUpd(gc2, reg1, dst); } } break; default: #ifdef DEBUG emitDispIns(id, false, false, false); #endif assert(!"unexpected GC ref instruction format"); } } else { if (!emitInsCanOnlyWriteSSE2OrAVXReg(id)) { switch (id->idInsFmt()) { case IF_RRD_CNS: // INS_mulEAX can not be used with any of these formats assert(ins != INS_mulEAX && ins != INS_imulEAX); // For the three operand imul instruction the target // register is encoded in the opcode if (instrIs3opImul(ins)) { regNumber tgtReg = inst3opImulReg(ins); emitGCregDeadUpd(tgtReg, dst); } break; case IF_RWR_RRD: case IF_RRW_RRD: case IF_RWR_RRD_RRD: emitGCregDeadUpd(reg1, dst); break; default: break; } } } return dst; } BYTE* emitter::emitOutputRRR(BYTE* dst, instrDesc* id) { code_t code; instruction ins = id->idIns(); assert(IsAVXInstruction(ins)); assert(IsThreeOperandAVXInstruction(ins) || isAvxBlendv(ins)); regNumber targetReg = id->idReg1(); regNumber src1 = id->idReg2(); regNumber src2 = id->idReg3(); emitAttr size = id->idOpSize(); code = insCodeRM(ins); code = AddVexPrefixIfNeeded(ins, code, size); code = insEncodeRMreg(ins, code); if (TakesRexWPrefix(ins, size)) { code = AddRexWPrefix(ins, code); } unsigned regCode = insEncodeReg345(ins, targetReg, size, &code); regCode |= insEncodeReg012(ins, src2, size, &code); // encode source operand reg in 'vvvv' bits in 1's complement form code = insEncodeReg3456(ins, src1, size, code); // Output the REX prefix dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code); // Is this a 'big' opcode? if (code & 0xFF000000) { // Output the highest word of the opcode dst += emitOutputWord(dst, code >> 16); code &= 0x0000FFFF; } else if (code & 0x00FF0000) { dst += emitOutputByte(dst, code >> 16); code &= 0x0000FFFF; } // TODO-XArch-CQ: Right now support 4-byte opcode instructions only if ((code & 0xFF00) == 0xC000) { dst += emitOutputWord(dst, code | (regCode << 8)); } else if ((code & 0xFF) == 0x00) { // This case happens for AVX instructions only assert(IsAVXInstruction(ins)); dst += emitOutputByte(dst, (code >> 8) & 0xFF); dst += emitOutputByte(dst, (0xC0 | regCode)); } else { dst += emitOutputWord(dst, code); dst += emitOutputByte(dst, (0xC0 | regCode)); } noway_assert(!id->idGCref()); if (!emitInsCanOnlyWriteSSE2OrAVXReg(id)) { switch (id->idInsFmt()) { case IF_RWR_RRD_RRD: case IF_RWR_RRD_RRD_CNS: case IF_RWR_RRD_RRD_RRD: emitGCregDeadUpd(id->idReg1(), dst); break; default: break; } } return dst; } /***************************************************************************** * * Output an instruction with a register and constant operands. */ BYTE* emitter::emitOutputRI(BYTE* dst, instrDesc* id) { code_t code; emitAttr size = id->idOpSize(); instruction ins = id->idIns(); regNumber reg = id->idReg1(); ssize_t val = emitGetInsSC(id); bool valInByte = ((signed char)val == (target_ssize_t)val) && (ins != INS_mov) && (ins != INS_test); // BT reg,imm might be useful but it requires special handling of the immediate value // (it is always encoded in a byte). Let's not complicate things until this is needed. assert(ins != INS_bt); if (id->idIsCnsReloc()) { valInByte = false; // relocs can't be placed in a byte } noway_assert(emitVerifyEncodable(ins, size, reg)); if (IsSSEOrAVXInstruction(ins)) { // Handle SSE2 instructions of the form "opcode reg, immed8" assert(id->idGCref() == GCT_NONE); assert(valInByte); // The left and right shifts use the same encoding, and are distinguished by the Reg/Opcode field. regNumber regOpcode = getSseShiftRegNumber(ins); // Get the 'base' opcode. code = insCodeMI(ins); code = AddVexPrefixIfNeeded(ins, code, size); code = insEncodeMIreg(ins, reg, size, code); assert(code & 0x00FF0000); if (TakesVexPrefix(ins)) { // The 'vvvv' bits encode the destination register, which for this case (RI) // is the same as the source. code = insEncodeReg3456(ins, reg, size, code); } unsigned regcode = (insEncodeReg345(ins, regOpcode, size, &code) | insEncodeReg012(ins, reg, size, &code)) << 8; // Output the REX prefix dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code); if (code & 0xFF000000) { dst += emitOutputWord(dst, code >> 16); } else if (code & 0xFF0000) { dst += emitOutputByte(dst, code >> 16); } dst += emitOutputWord(dst, code | regcode); dst += emitOutputByte(dst, val); return dst; } // The 'mov' opcode is special if (ins == INS_mov) { code = insCodeACC(ins); assert(code < 0x100); code |= 0x08; // Set the 'w' bit unsigned regcode = insEncodeReg012(ins, reg, size, &code); code |= regcode; // This is INS_mov and will not take VEX prefix assert(!TakesVexPrefix(ins)); if (TakesRexWPrefix(ins, size)) { code = AddRexWPrefix(ins, code); } dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code); dst += emitOutputByte(dst, code); if (size == EA_4BYTE) { dst += emitOutputLong(dst, val); } #ifdef TARGET_AMD64 else { assert(size == EA_PTRSIZE); dst += emitOutputSizeT(dst, val); } #endif if (id->idIsCnsReloc()) { emitRecordRelocation((void*)(dst - (unsigned)EA_SIZE(size)), (void*)(size_t)val, IMAGE_REL_BASED_MOFFSET); } goto DONE; } // Decide which encoding is the shortest bool useSigned, useACC; if (reg == REG_EAX && !instrIs3opImul(ins)) { if (size == EA_1BYTE || (ins == INS_test)) { // For al, ACC encoding is always the smallest useSigned = false; useACC = true; } else { /* For ax/eax, we avoid ACC encoding for small constants as we * can emit the small constant and have it sign-extended. * For big constants, the ACC encoding is better as we can use * the 1 byte opcode */ if (valInByte) { // avoid using ACC encoding useSigned = true; useACC = false; } else { useSigned = false; useACC = true; } } } else { useACC = false; if (valInByte) { useSigned = true; } else { useSigned = false; } } // "test" has no 's' bit if (!HasRegularWideImmediateForm(ins)) { useSigned = false; } // Get the 'base' opcode if (useACC) { assert(!useSigned); code = insCodeACC(ins); } else { assert(!useSigned || valInByte); // Some instructions (at least 'imul') do not have a // r/m, immed form, but do have a dstReg,srcReg,imm8 form. if (valInByte && useSigned && insNeedsRRIb(ins)) { code = insEncodeRRIb(ins, reg, size); } else { code = insCodeMI(ins); code = AddVexPrefixIfNeeded(ins, code, size); code = insEncodeMIreg(ins, reg, size, code); } } switch (size) { case EA_1BYTE: break; case EA_2BYTE: // Output a size prefix for a 16-bit operand dst += emitOutputByte(dst, 0x66); FALLTHROUGH; case EA_4BYTE: // Set the 'w' bit to get the large version code |= 0x1; break; #ifdef TARGET_AMD64 case EA_8BYTE: /* Set the 'w' bit to get the large version */ /* and the REX.W bit to get the really large version */ code = AddRexWPrefix(ins, code); code |= 0x1; break; #endif default: assert(!"unexpected size"); } // Output the REX prefix dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code); // Does the value fit in a sign-extended byte? // Important! Only set the 's' bit when we have a size larger than EA_1BYTE. // Note: A sign-extending immediate when (size == EA_1BYTE) is invalid in 64-bit mode. if (useSigned && (size > EA_1BYTE)) { // We can just set the 's' bit, and issue an immediate byte code |= 0x2; // Set the 's' bit to use a sign-extended immediate byte. dst += emitOutputWord(dst, code); dst += emitOutputByte(dst, val); } else { // Can we use an accumulator (EAX) encoding? if (useACC) { dst += emitOutputByte(dst, code); } else { dst += emitOutputWord(dst, code); } switch (size) { case EA_1BYTE: dst += emitOutputByte(dst, val); break; case EA_2BYTE: dst += emitOutputWord(dst, val); break; case EA_4BYTE: dst += emitOutputLong(dst, val); break; #ifdef TARGET_AMD64 case EA_8BYTE: dst += emitOutputLong(dst, val); break; #endif // TARGET_AMD64 default: break; } if (id->idIsCnsReloc()) { emitRecordRelocation((void*)(dst - sizeof(INT32)), (void*)(size_t)val, IMAGE_REL_BASED_HIGHLOW); assert(size == EA_4BYTE); } } DONE: // Does this instruction operate on a GC ref value? if (id->idGCref()) { switch (id->idInsFmt()) { case IF_RRD_CNS: break; case IF_RWR_CNS: emitGCregLiveUpd(id->idGCref(), id->idReg1(), dst); break; case IF_RRW_CNS: assert(id->idGCref() == GCT_BYREF); #ifdef DEBUG regMaskTP regMask; regMask = genRegMask(reg); // FIXNOW review the other places and relax the assert there too // The reg must currently be holding either a gcref or a byref // GCT_GCREF+int = GCT_BYREF, and GCT_BYREF+/-int = GCT_BYREF if (emitThisGCrefRegs & regMask) { assert(ins == INS_add); } if (emitThisByrefRegs & regMask) { assert(ins == INS_add || ins == INS_sub); } #endif // Mark it as holding a GCT_BYREF emitGCregLiveUpd(GCT_BYREF, id->idReg1(), dst); break; default: #ifdef DEBUG emitDispIns(id, false, false, false); #endif assert(!"unexpected GC ref instruction format"); } // mul can never produce a GC ref assert(!instrIs3opImul(ins)); assert(ins != INS_mulEAX && ins != INS_imulEAX); } else { switch (id->idInsFmt()) { case IF_RRD_CNS: // INS_mulEAX can not be used with any of these formats assert(ins != INS_mulEAX && ins != INS_imulEAX); // For the three operand imul instruction the target // register is encoded in the opcode if (instrIs3opImul(ins)) { regNumber tgtReg = inst3opImulReg(ins); emitGCregDeadUpd(tgtReg, dst); } break; case IF_RRW_CNS: case IF_RWR_CNS: assert(!instrIs3opImul(ins)); emitGCregDeadUpd(id->idReg1(), dst); break; default: #ifdef DEBUG emitDispIns(id, false, false, false); #endif assert(!"unexpected GC ref instruction format"); } } return dst; } /***************************************************************************** * * Output an instruction with a constant operand. */ BYTE* emitter::emitOutputIV(BYTE* dst, instrDesc* id) { code_t code; instruction ins = id->idIns(); emitAttr size = id->idOpSize(); ssize_t val = emitGetInsSC(id); bool valInByte = ((signed char)val == (target_ssize_t)val); // We would to update GC info correctly assert(!IsSSEInstruction(ins)); assert(!IsAVXInstruction(ins)); #ifdef TARGET_AMD64 // all these opcodes take a sign-extended 4-byte immediate, max noway_assert(size < EA_8BYTE || ((int)val == val && !id->idIsCnsReloc())); #endif if (id->idIsCnsReloc()) { valInByte = false; // relocs can't be placed in a byte // Of these instructions only the push instruction can have reloc assert(ins == INS_push || ins == INS_push_hide); } switch (ins) { case INS_jge: assert((val >= -128) && (val <= 127)); dst += emitOutputByte(dst, insCode(ins)); dst += emitOutputByte(dst, val); break; case INS_loop: assert((val >= -128) && (val <= 127)); dst += emitOutputByte(dst, insCodeMI(ins)); dst += emitOutputByte(dst, val); break; case INS_ret: assert(val); dst += emitOutputByte(dst, insCodeMI(ins)); dst += emitOutputWord(dst, val); break; case INS_push_hide: case INS_push: code = insCodeMI(ins); // Does the operand fit in a byte? if (valInByte) { dst += emitOutputByte(dst, code | 2); dst += emitOutputByte(dst, val); } else { if (TakesRexWPrefix(ins, size)) { code = AddRexWPrefix(ins, code); dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code); } dst += emitOutputByte(dst, code); dst += emitOutputLong(dst, val); if (id->idIsCnsReloc()) { emitRecordRelocation((void*)(dst - sizeof(INT32)), (void*)(size_t)val, IMAGE_REL_BASED_HIGHLOW); } } // Did we push a GC ref value? if (id->idGCref()) { #ifdef DEBUG printf("UNDONE: record GCref push [cns]\n"); #endif } break; default: assert(!"unexpected instruction"); } return dst; } /***************************************************************************** * * Output a local jump instruction. * This function also handles non-jumps that have jump-like characteristics, like RIP-relative LEA of a label that * needs to get bound to an actual address and processed by branch shortening. */ BYTE* emitter::emitOutputLJ(insGroup* ig, BYTE* dst, instrDesc* i) { unsigned srcOffs; unsigned dstOffs; BYTE* srcAddr; BYTE* dstAddr; ssize_t distVal; instrDescJmp* id = (instrDescJmp*)i; instruction ins = id->idIns(); bool jmp; bool relAddr = true; // does the instruction use relative-addressing? // SSE/AVX doesnt make any sense here assert(!IsSSEInstruction(ins)); assert(!IsAVXInstruction(ins)); size_t ssz; size_t lsz; switch (ins) { default: ssz = JCC_SIZE_SMALL; lsz = JCC_SIZE_LARGE; jmp = true; break; case INS_jmp: ssz = JMP_SIZE_SMALL; lsz = JMP_SIZE_LARGE; jmp = true; break; case INS_call: ssz = lsz = CALL_INST_SIZE; jmp = false; break; case INS_push_hide: case INS_push: ssz = lsz = 5; jmp = false; relAddr = false; break; case INS_mov: case INS_lea: ssz = lsz = id->idCodeSize(); jmp = false; relAddr = false; break; } // Figure out the distance to the target srcOffs = emitCurCodeOffs(dst); srcAddr = emitOffsetToPtr(srcOffs); if (id->idAddr()->iiaHasInstrCount()) { assert(ig != nullptr); int instrCount = id->idAddr()->iiaGetInstrCount(); unsigned insNum = emitFindInsNum(ig, id); if (instrCount < 0) { // Backward branches using instruction count must be within the same instruction group. assert(insNum + 1 >= (unsigned)(-instrCount)); } dstOffs = ig->igOffs + emitFindOffset(ig, (insNum + 1 + instrCount)); dstAddr = emitOffsetToPtr(dstOffs); } else { dstOffs = id->idAddr()->iiaIGlabel->igOffs; dstAddr = emitOffsetToPtr(dstOffs); if (!relAddr) { srcAddr = nullptr; } } distVal = (ssize_t)(dstAddr - srcAddr); if (dstOffs <= srcOffs) { // This is a backward jump - distance is known at this point CLANG_FORMAT_COMMENT_ANCHOR; #if DEBUG_EMIT if (id->idDebugOnlyInfo()->idNum == (unsigned)INTERESTING_JUMP_NUM || INTERESTING_JUMP_NUM == 0) { size_t blkOffs = id->idjIG->igOffs; if (INTERESTING_JUMP_NUM == 0) { printf("[3] Jump %u:\n", id->idDebugOnlyInfo()->idNum); } printf("[3] Jump block is at %08X - %02X = %08X\n", blkOffs, emitOffsAdj, blkOffs - emitOffsAdj); printf("[3] Jump is at %08X - %02X = %08X\n", srcOffs, emitOffsAdj, srcOffs - emitOffsAdj); printf("[3] Label block is at %08X - %02X = %08X\n", dstOffs, emitOffsAdj, dstOffs - emitOffsAdj); } #endif // Can we use a short jump? if (jmp && distVal - ssz >= (size_t)JMP_DIST_SMALL_MAX_NEG) { emitSetShortJump(id); } } else { // This is a forward jump - distance will be an upper limit emitFwdJumps = true; // The target offset will be closer by at least 'emitOffsAdj', but only if this // jump doesn't cross the hot-cold boundary. if (!emitJumpCrossHotColdBoundary(srcOffs, dstOffs)) { dstOffs -= emitOffsAdj; distVal -= emitOffsAdj; } // Record the location of the jump for later patching id->idjOffs = dstOffs; // Are we overflowing the id->idjOffs bitfield? if (id->idjOffs != dstOffs) { IMPL_LIMITATION("Method is too large"); } #if DEBUG_EMIT if (id->idDebugOnlyInfo()->idNum == (unsigned)INTERESTING_JUMP_NUM || INTERESTING_JUMP_NUM == 0) { size_t blkOffs = id->idjIG->igOffs; if (INTERESTING_JUMP_NUM == 0) { printf("[4] Jump %u:\n", id->idDebugOnlyInfo()->idNum); } printf("[4] Jump block is at %08X\n", blkOffs); printf("[4] Jump is at %08X\n", srcOffs); printf("[4] Label block is at %08X - %02X = %08X\n", dstOffs + emitOffsAdj, emitOffsAdj, dstOffs); } #endif // Can we use a short jump? if (jmp && distVal - ssz <= (size_t)JMP_DIST_SMALL_MAX_POS) { emitSetShortJump(id); } } // Adjust the offset to emit relative to the end of the instruction if (relAddr) { distVal -= id->idjShort ? ssz : lsz; } #ifdef DEBUG if (0 && emitComp->verbose) { size_t sz = id->idjShort ? ssz : lsz; int distValSize = id->idjShort ? 4 : 8; printf("; %s jump [%08X/%03u] from %0*X to %0*X: dist = %08XH\n", (dstOffs <= srcOffs) ? "Fwd" : "Bwd", emitComp->dspPtr(id), id->idDebugOnlyInfo()->idNum, distValSize, srcOffs + sz, distValSize, dstOffs, distVal); } #endif // What size jump should we use? if (id->idjShort) { // Short jump assert(!id->idjKeepLong); assert(emitJumpCrossHotColdBoundary(srcOffs, dstOffs) == false); assert(JMP_SIZE_SMALL == JCC_SIZE_SMALL); assert(JMP_SIZE_SMALL == 2); assert(jmp); if (id->idCodeSize() != JMP_SIZE_SMALL) { #if DEBUG_EMIT || defined(DEBUG) int offsShrinkage = id->idCodeSize() - JMP_SIZE_SMALL; if (INDEBUG(emitComp->verbose ||)(id->idDebugOnlyInfo()->idNum == (unsigned)INTERESTING_JUMP_NUM || INTERESTING_JUMP_NUM == 0)) { printf("; NOTE: size of jump [%08p] mis-predicted by %d bytes\n", dspPtr(id), offsShrinkage); } #endif } dst += emitOutputByte(dst, insCode(ins)); // For forward jumps, record the address of the distance value id->idjTemp.idjAddr = (distVal > 0) ? dst : nullptr; dst += emitOutputByte(dst, distVal); } else { code_t code; // Long jump if (jmp) { // clang-format off assert(INS_jmp + (INS_l_jmp - INS_jmp) == INS_l_jmp); assert(INS_jo + (INS_l_jmp - INS_jmp) == INS_l_jo); assert(INS_jb + (INS_l_jmp - INS_jmp) == INS_l_jb); assert(INS_jae + (INS_l_jmp - INS_jmp) == INS_l_jae); assert(INS_je + (INS_l_jmp - INS_jmp) == INS_l_je); assert(INS_jne + (INS_l_jmp - INS_jmp) == INS_l_jne); assert(INS_jbe + (INS_l_jmp - INS_jmp) == INS_l_jbe); assert(INS_ja + (INS_l_jmp - INS_jmp) == INS_l_ja); assert(INS_js + (INS_l_jmp - INS_jmp) == INS_l_js); assert(INS_jns + (INS_l_jmp - INS_jmp) == INS_l_jns); assert(INS_jp + (INS_l_jmp - INS_jmp) == INS_l_jp); assert(INS_jnp + (INS_l_jmp - INS_jmp) == INS_l_jnp); assert(INS_jl + (INS_l_jmp - INS_jmp) == INS_l_jl); assert(INS_jge + (INS_l_jmp - INS_jmp) == INS_l_jge); assert(INS_jle + (INS_l_jmp - INS_jmp) == INS_l_jle); assert(INS_jg + (INS_l_jmp - INS_jmp) == INS_l_jg); // clang-format on code = insCode((instruction)(ins + (INS_l_jmp - INS_jmp))); } else if (ins == INS_push || ins == INS_push_hide) { assert(insCodeMI(INS_push) == 0x68); code = 0x68; } else if (ins == INS_mov) { // Make it look like IF_SWR_CNS so that emitOutputSV emits the r/m32 for us insFormat tmpInsFmt = id->idInsFmt(); insGroup* tmpIGlabel = id->idAddr()->iiaIGlabel; bool tmpDspReloc = id->idIsDspReloc(); id->idInsFmt(IF_SWR_CNS); id->idAddr()->iiaLclVar = ((instrDescLbl*)id)->dstLclVar; id->idSetIsDspReloc(false); dst = emitOutputSV(dst, id, insCodeMI(ins)); // Restore id fields with original values id->idInsFmt(tmpInsFmt); id->idAddr()->iiaIGlabel = tmpIGlabel; id->idSetIsDspReloc(tmpDspReloc); code = 0xCC; } else if (ins == INS_lea) { // Make an instrDesc that looks like IF_RWR_ARD so that emitOutputAM emits the r/m32 for us. // We basically are doing what emitIns_R_AI does. // TODO-XArch-Cleanup: revisit this. instrDescAmd idAmdStackLocal; instrDescAmd* idAmd = &idAmdStackLocal; *(instrDesc*)idAmd = *(instrDesc*)id; // copy all the "core" fields memset((BYTE*)idAmd + sizeof(instrDesc), 0, sizeof(instrDescAmd) - sizeof(instrDesc)); // zero out the tail that wasn't copied idAmd->idInsFmt(IF_RWR_ARD); idAmd->idAddr()->iiaAddrMode.amBaseReg = REG_NA; idAmd->idAddr()->iiaAddrMode.amIndxReg = REG_NA; emitSetAmdDisp(idAmd, distVal); // set the displacement idAmd->idSetIsDspReloc(id->idIsDspReloc()); assert(emitGetInsAmdAny(idAmd) == distVal); // make sure "disp" is stored properly UNATIVE_OFFSET sz = emitInsSizeAM(idAmd, insCodeRM(ins)); idAmd->idCodeSize(sz); code = insCodeRM(ins); code |= (insEncodeReg345(ins, id->idReg1(), EA_PTRSIZE, &code) << 8); dst = emitOutputAM(dst, idAmd, code, nullptr); code = 0xCC; // For forward jumps, record the address of the distance value // Hard-coded 4 here because we already output the displacement, as the last thing. id->idjTemp.idjAddr = (dstOffs > srcOffs) ? (dst - 4) : nullptr; // We're done return dst; } else { code = 0xE8; } if (ins != INS_mov) { dst += emitOutputByte(dst, code); if (code & 0xFF00) { dst += emitOutputByte(dst, code >> 8); } } // For forward jumps, record the address of the distance value id->idjTemp.idjAddr = (dstOffs > srcOffs) ? dst : nullptr; dst += emitOutputLong(dst, distVal); #ifndef TARGET_AMD64 // all REL32 on AMD have to go through recordRelocation if (emitComp->opts.compReloc) #endif { if (!relAddr) { emitRecordRelocation((void*)(dst - sizeof(INT32)), (void*)distVal, IMAGE_REL_BASED_HIGHLOW); } else if (emitJumpCrossHotColdBoundary(srcOffs, dstOffs)) { assert(id->idjKeepLong); emitRecordRelocation((void*)(dst - sizeof(INT32)), dst + distVal, IMAGE_REL_BASED_REL32); } } } // Local calls kill all registers if (ins == INS_call && (emitThisGCrefRegs | emitThisByrefRegs)) { emitGCregDeadUpdMask(emitThisGCrefRegs | emitThisByrefRegs, dst); } return dst; } /***************************************************************************** * * Append the machine code corresponding to the given instruction descriptor * to the code block at '*dp'; the base of the code block is 'bp', and 'ig' * is the instruction group that contains the instruction. Updates '*dp' to * point past the generated code, and returns the size of the instruction * descriptor in bytes. */ #ifdef _PREFAST_ #pragma warning(push) #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function #endif size_t emitter::emitOutputInstr(insGroup* ig, instrDesc* id, BYTE** dp) { assert(emitIssuing); BYTE* dst = *dp; size_t sz = sizeof(instrDesc); instruction ins = id->idIns(); unsigned char callInstrSize = 0; #ifdef DEBUG bool dspOffs = emitComp->opts.dspGCtbls; #endif // DEBUG emitAttr size = id->idOpSize(); assert(REG_NA == (int)REG_NA); assert(ins != INS_imul || size >= EA_4BYTE); // Has no 'w' bit assert(instrIs3opImul(id->idIns()) == 0 || size >= EA_4BYTE); // Has no 'w' bit VARSET_TP GCvars(VarSetOps::UninitVal()); // What instruction format have we got? switch (id->idInsFmt()) { code_t code; unsigned regcode; int args; CnsVal cnsVal; BYTE* addr; bool recCall; regMaskTP gcrefRegs; regMaskTP byrefRegs; /********************************************************************/ /* No operands */ /********************************************************************/ case IF_NONE: // the loop alignment pseudo instruction if (ins == INS_align) { sz = sizeof(instrDescAlign); // IG can be marked as not needing alignment after emitting align instruction // In such case, skip outputting alignment. if (ig->endsWithAlignInstr()) { dst = emitOutputAlign(ig, id, dst); } #ifdef DEBUG else { // If the IG is not marked as need alignment, then the code size // should be zero i.e. no padding needed. assert(id->idCodeSize() == 0); } #endif break; } if (ins == INS_nop) { BYTE* dstRW = dst + writeableOffset; dstRW = emitOutputNOP(dstRW, id->idCodeSize()); dst = dstRW - writeableOffset; break; } // the cdq instruction kills the EDX register implicitly if (ins == INS_cdq) { emitGCregDeadUpd(REG_EDX, dst); } assert(id->idGCref() == GCT_NONE); code = insCodeMR(ins); #ifdef TARGET_AMD64 // Support only scalar AVX instructions and hence size is hard coded to 4-byte. code = AddVexPrefixIfNeeded(ins, code, EA_4BYTE); if (((ins == INS_cdq) || (ins == INS_cwde)) && TakesRexWPrefix(ins, id->idOpSize())) { code = AddRexWPrefix(ins, code); } dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code); #endif // Is this a 'big' opcode? if (code & 0xFF000000) { // The high word and then the low word dst += emitOutputWord(dst, code >> 16); code &= 0x0000FFFF; dst += emitOutputWord(dst, code); } else if (code & 0x00FF0000) { // The high byte and then the low word dst += emitOutputByte(dst, code >> 16); code &= 0x0000FFFF; dst += emitOutputWord(dst, code); } else if (code & 0xFF00) { // The 2 byte opcode dst += emitOutputWord(dst, code); } else { // The 1 byte opcode dst += emitOutputByte(dst, code); } break; /********************************************************************/ /* Simple constant, local label, method */ /********************************************************************/ case IF_CNS: dst = emitOutputIV(dst, id); sz = emitSizeOfInsDsc(id); break; case IF_LABEL: case IF_RWR_LABEL: case IF_SWR_LABEL: assert(id->idGCref() == GCT_NONE); assert(id->idIsBound()); // TODO-XArch-Cleanup: handle IF_RWR_LABEL in emitOutputLJ() or change it to emitOutputAM()? dst = emitOutputLJ(ig, dst, id); sz = (id->idInsFmt() == IF_SWR_LABEL ? sizeof(instrDescLbl) : sizeof(instrDescJmp)); break; case IF_METHOD: case IF_METHPTR: // Assume we'll be recording this call recCall = true; // Get hold of the argument count and field Handle args = emitGetInsCDinfo(id); // Is this a "fat" call descriptor? if (id->idIsLargeCall()) { instrDescCGCA* idCall = (instrDescCGCA*)id; gcrefRegs = idCall->idcGcrefRegs; byrefRegs = idCall->idcByrefRegs; VarSetOps::Assign(emitComp, GCvars, idCall->idcGCvars); sz = sizeof(instrDescCGCA); } else { assert(!id->idIsLargeDsp()); assert(!id->idIsLargeCns()); gcrefRegs = emitDecodeCallGCregs(id); byrefRegs = 0; VarSetOps::AssignNoCopy(emitComp, GCvars, VarSetOps::MakeEmpty(emitComp)); sz = sizeof(instrDesc); } addr = (BYTE*)id->idAddr()->iiaAddr; assert(addr != nullptr); // Some helpers don't get recorded in GC tables if (id->idIsNoGC()) { recCall = false; } // What kind of a call do we have here? if (id->idInsFmt() == IF_METHPTR) { // This is call indirect via a method pointer assert((ins == INS_call) || (ins == INS_tail_i_jmp)); code = insCodeMR(ins); if (id->idIsDspReloc()) { dst += emitOutputWord(dst, code | 0x0500); #ifdef TARGET_AMD64 dst += emitOutputLong(dst, 0); #else dst += emitOutputLong(dst, (int)(ssize_t)addr); #endif emitRecordRelocation((void*)(dst - sizeof(int)), addr, IMAGE_REL_BASED_DISP32); } else { #ifdef TARGET_X86 dst += emitOutputWord(dst, code | 0x0500); #else // TARGET_AMD64 // Amd64: addr fits within 32-bits and can be encoded as a displacement relative to zero. // This addr mode should never be used while generating relocatable ngen code nor if // the addr can be encoded as pc-relative address. noway_assert(!emitComp->opts.compReloc); noway_assert(codeGen->genAddrRelocTypeHint((size_t)addr) != IMAGE_REL_BASED_REL32); noway_assert(static_cast<int>(reinterpret_cast<intptr_t>(addr)) == (ssize_t)addr); // This requires, specifying a SIB byte after ModRM byte. dst += emitOutputWord(dst, code | 0x0400); dst += emitOutputByte(dst, 0x25); #endif // TARGET_AMD64 dst += emitOutputLong(dst, static_cast<int>(reinterpret_cast<intptr_t>(addr))); } goto DONE_CALL; } // Else // This is call direct where we know the target, thus we can // use a direct call; the target to jump to is in iiaAddr. assert(id->idInsFmt() == IF_METHOD); // Output the call opcode followed by the target distance dst += (ins == INS_l_jmp) ? emitOutputByte(dst, insCode(ins)) : emitOutputByte(dst, insCodeMI(ins)); ssize_t offset; #ifdef TARGET_AMD64 // All REL32 on Amd64 go through recordRelocation. Here we will output zero to advance dst. offset = 0; assert(id->idIsDspReloc()); #else // Calculate PC relative displacement. // Although you think we should be using sizeof(void*), the x86 and x64 instruction set // only allow a 32-bit offset, so we correctly use sizeof(INT32) offset = addr - (dst + sizeof(INT32)); #endif dst += emitOutputLong(dst, offset); if (id->idIsDspReloc()) { emitRecordRelocation((void*)(dst - sizeof(INT32)), addr, IMAGE_REL_BASED_REL32); } DONE_CALL: /* We update the variable (not register) GC info before the call as the variables cannot be used by the call. Killing variables before the call helps with boundary conditions if the call is CORINFO_HELP_THROW - see bug 50029. If we ever track aliased variables (which could be used by the call), we would have to keep them alive past the call. */ assert(FitsIn<unsigned char>(dst - *dp)); callInstrSize = static_cast<unsigned char>(dst - *dp); // Note the use of address `*dp`, the call instruction address, instead of `dst`, the post-call-instruction // address. emitUpdateLiveGCvars(GCvars, *dp); #ifdef DEBUG // Output any delta in GC variable info, corresponding to the before-call GC var updates done above. if (EMIT_GC_VERBOSE || emitComp->opts.disasmWithGC) { emitDispGCVarDelta(); } #endif // DEBUG // If the method returns a GC ref, mark EAX appropriately if (id->idGCref() == GCT_GCREF) { gcrefRegs |= RBM_EAX; } else if (id->idGCref() == GCT_BYREF) { byrefRegs |= RBM_EAX; } #ifdef UNIX_AMD64_ABI // If is a multi-register return method is called, mark RDX appropriately (for System V AMD64). if (id->idIsLargeCall()) { instrDescCGCA* idCall = (instrDescCGCA*)id; if (idCall->idSecondGCref() == GCT_GCREF) { gcrefRegs |= RBM_RDX; } else if (idCall->idSecondGCref() == GCT_BYREF) { byrefRegs |= RBM_RDX; } } #endif // UNIX_AMD64_ABI // If the GC register set has changed, report the new set if (gcrefRegs != emitThisGCrefRegs) { emitUpdateLiveGCregs(GCT_GCREF, gcrefRegs, dst); } if (byrefRegs != emitThisByrefRegs) { emitUpdateLiveGCregs(GCT_BYREF, byrefRegs, dst); } if (recCall || args) { // For callee-pop, all arguments will be popped after the call. // For caller-pop, any GC arguments will go dead after the call. assert(callInstrSize != 0); if (args >= 0) { emitStackPop(dst, /*isCall*/ true, callInstrSize, args); } else { emitStackKillArgs(dst, -args, callInstrSize); } } // Do we need to record a call location for GC purposes? if (!emitFullGCinfo && recCall) { assert(callInstrSize != 0); emitRecordGCcall(dst, callInstrSize); } #ifdef DEBUG if (ins == INS_call) { emitRecordCallSite(emitCurCodeOffs(*dp), id->idDebugOnlyInfo()->idCallSig, (CORINFO_METHOD_HANDLE)id->idDebugOnlyInfo()->idMemCookie); } #endif // DEBUG break; /********************************************************************/ /* One register operand */ /********************************************************************/ case IF_RRD: case IF_RWR: case IF_RRW: dst = emitOutputR(dst, id); sz = SMALL_IDSC_SIZE; break; /********************************************************************/ /* Register and register/constant */ /********************************************************************/ case IF_RRW_SHF: code = insCodeMR(ins); // Emit the VEX prefix if it exists code = AddVexPrefixIfNeeded(ins, code, size); code = insEncodeMRreg(ins, id->idReg1(), size, code); // set the W bit if (size != EA_1BYTE) { code |= 1; } // Emit the REX prefix if it exists if (TakesRexWPrefix(ins, size)) { code = AddRexWPrefix(ins, code); } // Output a size prefix for a 16-bit operand if (size == EA_2BYTE) { dst += emitOutputByte(dst, 0x66); } dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code); dst += emitOutputWord(dst, code); dst += emitOutputByte(dst, emitGetInsSC(id)); sz = emitSizeOfInsDsc(id); // Update GC info. assert(!id->idGCref()); emitGCregDeadUpd(id->idReg1(), dst); break; case IF_RRD_RRD: case IF_RWR_RRD: case IF_RRW_RRD: case IF_RRW_RRW: dst = emitOutputRR(dst, id); sz = SMALL_IDSC_SIZE; break; case IF_RRD_CNS: case IF_RWR_CNS: case IF_RRW_CNS: dst = emitOutputRI(dst, id); sz = emitSizeOfInsDsc(id); break; case IF_RWR_RRD_RRD: dst = emitOutputRRR(dst, id); sz = emitSizeOfInsDsc(id); break; case IF_RWR_RRD_RRD_CNS: case IF_RWR_RRD_RRD_RRD: dst = emitOutputRRR(dst, id); sz = emitSizeOfInsDsc(id); dst += emitOutputByte(dst, emitGetInsSC(id)); break; case IF_RRW_RRW_CNS: assert(id->idGCref() == GCT_NONE); // Get the 'base' opcode (it's a big one) // Also, determine which operand goes where in the ModRM byte. regNumber mReg; regNumber rReg; if (hasCodeMR(ins)) { code = insCodeMR(ins); // Emit the VEX prefix if it exists code = AddVexPrefixIfNeeded(ins, code, size); code = insEncodeMRreg(ins, code); mReg = id->idReg1(); rReg = id->idReg2(); } else if (hasCodeMI(ins)) { code = insCodeMI(ins); // Emit the VEX prefix if it exists code = AddVexPrefixIfNeeded(ins, code, size); assert((code & 0xC000) == 0); code |= 0xC000; mReg = id->idReg2(); // The left and right shifts use the same encoding, and are distinguished by the Reg/Opcode field. rReg = getSseShiftRegNumber(ins); } else { code = insCodeRM(ins); // Emit the VEX prefix if it exists code = AddVexPrefixIfNeeded(ins, code, size); code = insEncodeRMreg(ins, code); mReg = id->idReg2(); rReg = id->idReg1(); } assert(code & 0x00FF0000); if (TakesRexWPrefix(ins, size)) { code = AddRexWPrefix(ins, code); } if (TakesVexPrefix(ins)) { if (IsDstDstSrcAVXInstruction(ins)) { // Encode source/dest operand reg in 'vvvv' bits in 1's complement form // This code will have to change when we support 3 operands. // For now, we always overload this source with the destination (always reg1). // (Though we will need to handle the few ops that can have the 'vvvv' bits as destination, // e.g. pslldq, when/if we support those instructions with 2 registers.) // (see x64 manual Table 2-9. Instructions with a VEX.vvvv destination) code = insEncodeReg3456(ins, id->idReg1(), size, code); } else if (IsDstSrcSrcAVXInstruction(ins)) { // This is a "merge" move instruction. // Encode source operand reg in 'vvvv' bits in 1's complement form code = insEncodeReg3456(ins, id->idReg2(), size, code); } } regcode = (insEncodeReg345(ins, rReg, size, &code) | insEncodeReg012(ins, mReg, size, &code)); // Output the REX prefix dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code); if (code & 0xFF000000) { // Output the highest word of the opcode dst += emitOutputWord(dst, code >> 16); code &= 0x0000FFFF; if (Is4ByteSSEInstruction(ins)) { // Output 3rd byte of the opcode dst += emitOutputByte(dst, code); code &= 0xFF00; } } else if (code & 0x00FF0000) { dst += emitOutputByte(dst, code >> 16); code &= 0x0000FFFF; } // TODO-XArch-CQ: Right now support 4-byte opcode instructions only if ((code & 0xFF00) == 0xC000) { dst += emitOutputWord(dst, code | (regcode << 8)); } else if ((code & 0xFF) == 0x00) { // This case happens for some SSE/AVX instructions only assert(IsAVXInstruction(ins) || Is4ByteSSEInstruction(ins)); dst += emitOutputByte(dst, (code >> 8) & 0xFF); dst += emitOutputByte(dst, (0xC0 | regcode)); } else { dst += emitOutputWord(dst, code); dst += emitOutputByte(dst, (0xC0 | regcode)); } dst += emitOutputByte(dst, emitGetInsSC(id)); sz = emitSizeOfInsDsc(id); // Kill any GC ref in the destination register if necessary. if (!emitInsCanOnlyWriteSSE2OrAVXReg(id)) { emitGCregDeadUpd(id->idReg1(), dst); } break; /********************************************************************/ /* Address mode operand */ /********************************************************************/ case IF_ARD: case IF_AWR: case IF_ARW: dst = emitCodeWithInstructionSize(dst, emitOutputAM(dst, id, insCodeMR(ins)), &callInstrSize); switch (ins) { case INS_call: IND_CALL: // Get hold of the argument count and method handle args = emitGetInsCIargs(id); // Is this a "fat" call descriptor? if (id->idIsLargeCall()) { instrDescCGCA* idCall = (instrDescCGCA*)id; gcrefRegs = idCall->idcGcrefRegs; byrefRegs = idCall->idcByrefRegs; VarSetOps::Assign(emitComp, GCvars, idCall->idcGCvars); sz = sizeof(instrDescCGCA); } else { assert(!id->idIsLargeDsp()); assert(!id->idIsLargeCns()); gcrefRegs = emitDecodeCallGCregs(id); byrefRegs = 0; VarSetOps::AssignNoCopy(emitComp, GCvars, VarSetOps::MakeEmpty(emitComp)); sz = sizeof(instrDesc); } recCall = true; goto DONE_CALL; default: sz = emitSizeOfInsDsc(id); break; } break; case IF_RRW_ARD_CNS: case IF_RWR_ARD_CNS: assert(IsSSEOrAVXInstruction(ins)); emitGetInsAmdCns(id, &cnsVal); code = insCodeRM(ins); // Special case 4-byte AVX instructions if (EncodedBySSE38orSSE3A(ins)) { dst = emitOutputAM(dst, id, code, &cnsVal); } else { code = AddVexPrefixIfNeeded(ins, code, size); regcode = (insEncodeReg345(ins, id->idReg1(), size, &code) << 8); dst = emitOutputAM(dst, id, code | regcode, &cnsVal); } sz = emitSizeOfInsDsc(id); break; case IF_AWR_RRD_CNS: assert(ins == INS_vextracti128 || ins == INS_vextractf128); assert(UseVEXEncoding()); emitGetInsAmdCns(id, &cnsVal); code = insCodeMR(ins); dst = emitOutputAM(dst, id, code, &cnsVal); sz = emitSizeOfInsDsc(id); break; case IF_RRD_ARD: case IF_RWR_ARD: case IF_RRW_ARD: case IF_RWR_RRD_ARD: { code = insCodeRM(ins); if (EncodedBySSE38orSSE3A(ins) || (ins == INS_crc32)) { dst = emitOutputAM(dst, id, code); } else { code = AddVexPrefixIfNeeded(ins, code, size); regcode = (insEncodeReg345(ins, id->idReg1(), size, &code) << 8); dst = emitOutputAM(dst, id, code | regcode); } sz = emitSizeOfInsDsc(id); break; } case IF_RWR_ARD_RRD: { assert(IsAVX2GatherInstruction(ins)); code = insCodeRM(ins); dst = emitOutputAM(dst, id, code); sz = emitSizeOfInsDsc(id); break; } case IF_RWR_RRD_ARD_CNS: case IF_RWR_RRD_ARD_RRD: { assert(IsSSEOrAVXInstruction(ins)); emitGetInsAmdCns(id, &cnsVal); code = insCodeRM(ins); if (EncodedBySSE38orSSE3A(ins)) { dst = emitOutputAM(dst, id, code, &cnsVal); } else { code = AddVexPrefixIfNeeded(ins, code, size); regcode = (insEncodeReg345(ins, id->idReg1(), size, &code) << 8); dst = emitOutputAM(dst, id, code | regcode, &cnsVal); } sz = emitSizeOfInsDsc(id); break; } case IF_ARD_RRD: case IF_AWR_RRD: case IF_ARW_RRD: code = insCodeMR(ins); code = AddVexPrefixIfNeeded(ins, code, size); regcode = (insEncodeReg345(ins, id->idReg1(), size, &code) << 8); dst = emitOutputAM(dst, id, code | regcode); sz = emitSizeOfInsDsc(id); break; case IF_AWR_RRD_RRD: { code = insCodeMR(ins); code = AddVexPrefixIfNeeded(ins, code, size); dst = emitOutputAM(dst, id, code); sz = emitSizeOfInsDsc(id); break; } case IF_ARD_CNS: case IF_AWR_CNS: case IF_ARW_CNS: emitGetInsAmdCns(id, &cnsVal); dst = emitOutputAM(dst, id, insCodeMI(ins), &cnsVal); sz = emitSizeOfInsDsc(id); break; case IF_ARW_SHF: emitGetInsAmdCns(id, &cnsVal); dst = emitOutputAM(dst, id, insCodeMR(ins), &cnsVal); sz = emitSizeOfInsDsc(id); break; /********************************************************************/ /* Stack-based operand */ /********************************************************************/ case IF_SRD: case IF_SWR: case IF_SRW: assert(ins != INS_pop_hide); if (ins == INS_pop) { // The offset in "pop [ESP+xxx]" is relative to the new ESP value CLANG_FORMAT_COMMENT_ANCHOR; #if !FEATURE_FIXED_OUT_ARGS emitCurStackLvl -= sizeof(int); #endif dst = emitOutputSV(dst, id, insCodeMR(ins)); #if !FEATURE_FIXED_OUT_ARGS emitCurStackLvl += sizeof(int); #endif break; } dst = emitCodeWithInstructionSize(dst, emitOutputSV(dst, id, insCodeMR(ins)), &callInstrSize); if (ins == INS_call) { goto IND_CALL; } break; case IF_SRD_CNS: case IF_SWR_CNS: case IF_SRW_CNS: emitGetInsCns(id, &cnsVal); dst = emitOutputSV(dst, id, insCodeMI(ins), &cnsVal); sz = emitSizeOfInsDsc(id); break; case IF_SRW_SHF: emitGetInsCns(id, &cnsVal); dst = emitOutputSV(dst, id, insCodeMR(ins), &cnsVal); sz = emitSizeOfInsDsc(id); break; case IF_SWR_RRD_CNS: assert(ins == INS_vextracti128 || ins == INS_vextractf128); assert(UseVEXEncoding()); emitGetInsAmdCns(id, &cnsVal); code = insCodeMR(ins); dst = emitOutputSV(dst, id, insCodeMR(ins), &cnsVal); sz = emitSizeOfInsDsc(id); break; case IF_RRW_SRD_CNS: case IF_RWR_SRD_CNS: assert(IsSSEOrAVXInstruction(ins)); emitGetInsCns(id, &cnsVal); code = insCodeRM(ins); // Special case 4-byte AVX instructions if (EncodedBySSE38orSSE3A(ins)) { dst = emitOutputSV(dst, id, code, &cnsVal); } else { code = AddVexPrefixIfNeeded(ins, code, size); // In case of AVX instructions that take 3 operands, encode reg1 as first source. // Note that reg1 is both a source and a destination. // // TODO-XArch-CQ: Eventually we need to support 3 operand instruction formats. For // now we use the single source as source1 and source2. // For this format, moves do not support a third operand, so we only need to handle the binary ops. if (IsDstDstSrcAVXInstruction(ins)) { // encode source operand reg in 'vvvv' bits in 1's complement form code = insEncodeReg3456(ins, id->idReg1(), size, code); } regcode = (insEncodeReg345(ins, id->idReg1(), size, &code) << 8); dst = emitOutputSV(dst, id, code | regcode, &cnsVal); } sz = emitSizeOfInsDsc(id); break; case IF_RRD_SRD: case IF_RWR_SRD: case IF_RRW_SRD: { code = insCodeRM(ins); // 4-byte AVX instructions are special cased inside emitOutputSV // since they do not have space to encode ModRM byte. if (EncodedBySSE38orSSE3A(ins) || (ins == INS_crc32)) { dst = emitOutputSV(dst, id, code); } else { code = AddVexPrefixIfNeeded(ins, code, size); if (IsDstDstSrcAVXInstruction(ins)) { // encode source operand reg in 'vvvv' bits in 1's complement form code = insEncodeReg3456(ins, id->idReg1(), size, code); } regcode = (insEncodeReg345(ins, id->idReg1(), size, &code) << 8); dst = emitOutputSV(dst, id, code | regcode); } sz = emitSizeOfInsDsc(id); break; } case IF_RWR_RRD_SRD: { // This should only be called on AVX instructions assert(IsAVXInstruction(ins)); code = insCodeRM(ins); code = AddVexPrefixIfNeeded(ins, code, size); code = insEncodeReg3456(ins, id->idReg2(), size, code); // encode source operand reg in 'vvvv' bits in 1's complement form // 4-byte AVX instructions are special cased inside emitOutputSV // since they do not have space to encode ModRM byte. if (EncodedBySSE38orSSE3A(ins)) { dst = emitOutputSV(dst, id, code); } else { regcode = (insEncodeReg345(ins, id->idReg1(), size, &code) << 8); dst = emitOutputSV(dst, id, code | regcode); } break; } case IF_RWR_RRD_SRD_CNS: case IF_RWR_RRD_SRD_RRD: { // This should only be called on AVX instructions assert(IsAVXInstruction(ins)); emitGetInsCns(id, &cnsVal); code = insCodeRM(ins); code = AddVexPrefixIfNeeded(ins, code, size); code = insEncodeReg3456(ins, id->idReg2(), size, code); // encode source operand reg in 'vvvv' bits in 1's complement form // 4-byte AVX instructions are special cased inside emitOutputSV // since they do not have space to encode ModRM byte. if (EncodedBySSE38orSSE3A(ins)) { dst = emitOutputSV(dst, id, code, &cnsVal); } else { regcode = (insEncodeReg345(ins, id->idReg1(), size, &code) << 8); dst = emitOutputSV(dst, id, code | regcode, &cnsVal); } sz = emitSizeOfInsDsc(id); break; } case IF_SRD_RRD: case IF_SWR_RRD: case IF_SRW_RRD: code = insCodeMR(ins); code = AddVexPrefixIfNeeded(ins, code, size); // In case of AVX instructions that take 3 operands, encode reg1 as first source. // Note that reg1 is both a source and a destination. // // TODO-XArch-CQ: Eventually we need to support 3 operand instruction formats. For // now we use the single source as source1 and source2. // For this format, moves do not support a third operand, so we only need to handle the binary ops. if (IsDstDstSrcAVXInstruction(ins)) { // encode source operand reg in 'vvvv' bits in 1's complement form code = insEncodeReg3456(ins, id->idReg1(), size, code); } regcode = (insEncodeReg345(ins, id->idReg1(), size, &code) << 8); dst = emitOutputSV(dst, id, code | regcode); break; /********************************************************************/ /* Direct memory address */ /********************************************************************/ case IF_MRD: case IF_MRW: case IF_MWR: noway_assert(ins != INS_call); dst = emitOutputCV(dst, id, insCodeMR(ins) | 0x0500); sz = emitSizeOfInsDsc(id); break; case IF_MRD_OFF: dst = emitOutputCV(dst, id, insCodeMI(ins)); break; case IF_RRW_MRD_CNS: case IF_RWR_MRD_CNS: assert(IsSSEOrAVXInstruction(ins)); emitGetInsDcmCns(id, &cnsVal); code = insCodeRM(ins); // Special case 4-byte AVX instructions if (EncodedBySSE38orSSE3A(ins)) { dst = emitOutputCV(dst, id, code, &cnsVal); } else { code = AddVexPrefixIfNeeded(ins, code, size); // In case of AVX instructions that take 3 operands, encode reg1 as first source. // Note that reg1 is both a source and a destination. // // TODO-XArch-CQ: Eventually we need to support 3 operand instruction formats. For // now we use the single source as source1 and source2. // For this format, moves do not support a third operand, so we only need to handle the binary ops. if (IsDstDstSrcAVXInstruction(ins)) { // encode source operand reg in 'vvvv' bits in 1's complement form code = insEncodeReg3456(ins, id->idReg1(), size, code); } regcode = (insEncodeReg345(ins, id->idReg1(), size, &code) << 8); dst = emitOutputCV(dst, id, code | regcode | 0x0500, &cnsVal); } sz = emitSizeOfInsDsc(id); break; case IF_MWR_RRD_CNS: assert(ins == INS_vextracti128 || ins == INS_vextractf128); assert(UseVEXEncoding()); emitGetInsDcmCns(id, &cnsVal); code = insCodeMR(ins); // only AVX2 vextracti128 and AVX vextractf128 can reach this path, // they do not need VEX.vvvv to encode the register operand dst = emitOutputCV(dst, id, code, &cnsVal); sz = emitSizeOfInsDsc(id); break; case IF_RRD_MRD: case IF_RWR_MRD: case IF_RRW_MRD: { code = insCodeRM(ins); // Special case 4-byte AVX instructions if (EncodedBySSE38orSSE3A(ins) || (ins == INS_crc32)) { dst = emitOutputCV(dst, id, code); } else { code = AddVexPrefixIfNeeded(ins, code, size); if (IsDstDstSrcAVXInstruction(ins)) { // encode source operand reg in 'vvvv' bits in 1's complement form code = insEncodeReg3456(ins, id->idReg1(), size, code); } regcode = (insEncodeReg345(ins, id->idReg1(), size, &code) << 8); dst = emitOutputCV(dst, id, code | regcode | 0x0500); } sz = emitSizeOfInsDsc(id); break; } case IF_RWR_RRD_MRD: { // This should only be called on AVX instructions assert(IsAVXInstruction(ins)); code = insCodeRM(ins); code = AddVexPrefixIfNeeded(ins, code, size); code = insEncodeReg3456(ins, id->idReg2(), size, code); // encode source operand reg in 'vvvv' bits in 1's complement form // Special case 4-byte AVX instructions if (EncodedBySSE38orSSE3A(ins)) { dst = emitOutputCV(dst, id, code); } else { regcode = (insEncodeReg345(ins, id->idReg1(), size, &code) << 8); dst = emitOutputCV(dst, id, code | regcode | 0x0500); } sz = emitSizeOfInsDsc(id); break; } case IF_RWR_RRD_MRD_CNS: case IF_RWR_RRD_MRD_RRD: { // This should only be called on AVX instructions assert(IsAVXInstruction(ins)); emitGetInsCns(id, &cnsVal); code = insCodeRM(ins); code = AddVexPrefixIfNeeded(ins, code, size); code = insEncodeReg3456(ins, id->idReg2(), size, code); // encode source operand reg in 'vvvv' bits in 1's complement form // Special case 4-byte AVX instructions if (EncodedBySSE38orSSE3A(ins)) { dst = emitOutputCV(dst, id, code, &cnsVal); } else { regcode = (insEncodeReg345(ins, id->idReg1(), size, &code) << 8); dst = emitOutputCV(dst, id, code | regcode | 0x0500, &cnsVal); } sz = emitSizeOfInsDsc(id); break; } case IF_RWR_MRD_OFF: code = insCode(ins); code = AddVexPrefixIfNeeded(ins, code, size); // In case of AVX instructions that take 3 operands, encode reg1 as first source. // Note that reg1 is both a source and a destination. // // TODO-XArch-CQ: Eventually we need to support 3 operand instruction formats. For // now we use the single source as source1 and source2. // For this format, moves do not support a third operand, so we only need to handle the binary ops. if (IsDstDstSrcAVXInstruction(ins)) { // encode source operand reg in 'vvvv' bits in 1's complement form code = insEncodeReg3456(ins, id->idReg1(), size, code); } regcode = insEncodeReg012(id->idIns(), id->idReg1(), size, &code); dst = emitOutputCV(dst, id, code | 0x30 | regcode); sz = emitSizeOfInsDsc(id); break; case IF_MRD_RRD: case IF_MWR_RRD: case IF_MRW_RRD: code = insCodeMR(ins); code = AddVexPrefixIfNeeded(ins, code, size); // In case of AVX instructions that take 3 operands, encode reg1 as first source. // Note that reg1 is both a source and a destination. // // TODO-XArch-CQ: Eventually we need to support 3 operand instruction formats. For // now we use the single source as source1 and source2. // For this format, moves do not support a third operand, so we only need to handle the binary ops. if (IsDstDstSrcAVXInstruction(ins)) { // encode source operand reg in 'vvvv' bits in 1's complement form code = insEncodeReg3456(ins, id->idReg1(), size, code); } regcode = (insEncodeReg345(ins, id->idReg1(), size, &code) << 8); dst = emitOutputCV(dst, id, code | regcode | 0x0500); sz = emitSizeOfInsDsc(id); break; case IF_MRD_CNS: case IF_MWR_CNS: case IF_MRW_CNS: emitGetInsDcmCns(id, &cnsVal); dst = emitOutputCV(dst, id, insCodeMI(ins) | 0x0500, &cnsVal); sz = emitSizeOfInsDsc(id); break; case IF_MRW_SHF: emitGetInsDcmCns(id, &cnsVal); dst = emitOutputCV(dst, id, insCodeMR(ins) | 0x0500, &cnsVal); sz = emitSizeOfInsDsc(id); break; /********************************************************************/ /* oops */ /********************************************************************/ default: #ifdef DEBUG printf("unexpected format %s\n", emitIfName(id->idInsFmt())); assert(!"don't know how to encode this instruction"); #endif break; } // Make sure we set the instruction descriptor size correctly assert(sz == emitSizeOfInsDsc(id)); #if !FEATURE_FIXED_OUT_ARGS bool updateStackLevel = !emitIGisInProlog(ig) && !emitIGisInEpilog(ig); #if defined(FEATURE_EH_FUNCLETS) updateStackLevel = updateStackLevel && !emitIGisInFuncletProlog(ig) && !emitIGisInFuncletEpilog(ig); #endif // FEATURE_EH_FUNCLETS // Make sure we keep the current stack level up to date if (updateStackLevel) { switch (ins) { case INS_push: // Please note: {INS_push_hide,IF_LABEL} is used to push the address of the // finally block for calling it locally for an op_leave. emitStackPush(dst, id->idGCref()); break; case INS_pop: emitStackPop(dst, false, /*callInstrSize*/ 0, 1); break; case INS_sub: // Check for "sub ESP, icon" if (ins == INS_sub && id->idInsFmt() == IF_RRW_CNS && id->idReg1() == REG_ESP) { assert((size_t)emitGetInsSC(id) < 0x00000000FFFFFFFFLL); emitStackPushN(dst, (unsigned)(emitGetInsSC(id) / TARGET_POINTER_SIZE)); } break; case INS_add: // Check for "add ESP, icon" if (ins == INS_add && id->idInsFmt() == IF_RRW_CNS && id->idReg1() == REG_ESP) { assert((size_t)emitGetInsSC(id) < 0x00000000FFFFFFFFLL); emitStackPop(dst, /*isCall*/ false, /*callInstrSize*/ 0, (unsigned)(emitGetInsSC(id) / TARGET_POINTER_SIZE)); } break; default: break; } } #endif // !FEATURE_FIXED_OUT_ARGS assert((int)emitCurStackLvl >= 0); // Only epilog "instructions" and some pseudo-instrs // are allowed not to generate any code assert(*dp != dst || emitInstHasNoCode(ins)); #ifdef DEBUG if (emitComp->opts.disAsm || emitComp->verbose) { emitDispIns(id, false, dspOffs, true, emitCurCodeOffs(*dp), *dp, (dst - *dp)); } #endif #if FEATURE_LOOP_ALIGN // Only compensate over-estimated instructions if emitCurIG is before // the last IG that needs alignment. if (emitCurIG->igNum <= emitLastAlignedIgNum) { int diff = id->idCodeSize() - ((UNATIVE_OFFSET)(dst - *dp)); assert(diff >= 0); if (diff != 0) { #ifdef DEBUG // should never over-estimate align instruction assert(id->idIns() != INS_align); JITDUMP("Added over-estimation compensation: %d\n", diff); if (emitComp->opts.disAsm) { emitDispInsAddr(dst); printf("\t\t ;; NOP compensation instructions of %d bytes.\n", diff); } #endif BYTE* dstRW = dst + writeableOffset; dstRW = emitOutputNOP(dstRW, diff); dst = dstRW - writeableOffset; } assert((id->idCodeSize() - ((UNATIVE_OFFSET)(dst - *dp))) == 0); } #endif #ifdef DEBUG if (emitComp->compDebugBreak) { // set JitEmitPrintRefRegs=1 will print out emitThisGCrefRegs and emitThisByrefRegs // at the beginning of this method. if (JitConfig.JitEmitPrintRefRegs() != 0) { printf("Before emitOutputInstr for id->idDebugOnlyInfo()->idNum=0x%02x\n", id->idDebugOnlyInfo()->idNum); printf(" emitThisGCrefRegs(0x%p)=", emitComp->dspPtr(&emitThisGCrefRegs)); printRegMaskInt(emitThisGCrefRegs); emitDispRegSet(emitThisGCrefRegs); printf("\n"); printf(" emitThisByrefRegs(0x%p)=", emitComp->dspPtr(&emitThisByrefRegs)); printRegMaskInt(emitThisByrefRegs); emitDispRegSet(emitThisByrefRegs); printf("\n"); } // For example, set JitBreakEmitOutputInstr=a6 will break when this method is called for // emitting instruction a6, (i.e. IN00a6 in jitdump). if ((unsigned)JitConfig.JitBreakEmitOutputInstr() == id->idDebugOnlyInfo()->idNum) { assert(!"JitBreakEmitOutputInstr reached"); } } #endif *dp = dst; #ifdef DEBUG if (ins == INS_mulEAX || ins == INS_imulEAX) { // INS_mulEAX has implicit target of Edx:Eax. Make sure // that we detected this cleared its GC-status. assert(((RBM_EAX | RBM_EDX) & (emitThisGCrefRegs | emitThisByrefRegs)) == 0); } if (instrIs3opImul(ins)) { // The target of the 3-operand imul is implicitly encoded. Make sure // that we detected the implicit register and cleared its GC-status. regMaskTP regMask = genRegMask(inst3opImulReg(ins)); assert((regMask & (emitThisGCrefRegs | emitThisByrefRegs)) == 0); } // Output any delta in GC info. if (EMIT_GC_VERBOSE || emitComp->opts.disasmWithGC) { emitDispGCInfoDelta(); } #endif return sz; } #ifdef _PREFAST_ #pragma warning(pop) #endif emitter::insFormat emitter::getMemoryOperation(instrDesc* id) { insFormat result = IF_NONE; instruction ins = id->idIns(); insFormat insFmt = id->idInsFmt(); if (ins == INS_lea) { // an INS_lea instruction doesn't actually read memory insFmt = IF_NONE; } switch (insFmt) { case IF_NONE: case IF_LABEL: case IF_RWR_LABEL: case IF_METHOD: case IF_CNS: case IF_RRD: case IF_RWR: case IF_RRW: case IF_RRD_CNS: case IF_RWR_CNS: case IF_RRW_CNS: case IF_RRW_SHF: case IF_RRD_RRD: case IF_RWR_RRD: case IF_RRW_RRD: case IF_RRW_RRW: case IF_RRW_RRW_CNS: case IF_RWR_RRD_RRD: case IF_RWR_RRD_RRD_CNS: case IF_RWR_RRD_RRD_RRD: // none, or register only result = IF_NONE; break; case IF_ARD: case IF_RRD_ARD: case IF_RWR_ARD: case IF_RRW_ARD: case IF_RWR_ARD_CNS: case IF_RWR_RRD_ARD: case IF_RRW_ARD_CNS: case IF_RWR_ARD_RRD: case IF_RWR_RRD_ARD_CNS: case IF_RWR_RRD_ARD_RRD: case IF_ARD_CNS: case IF_ARD_RRD: // Address [reg+reg*scale+cns] - read result = IF_ARD; break; case IF_AWR: case IF_AWR_RRD: case IF_AWR_CNS: case IF_AWR_RRD_CNS: case IF_AWR_RRD_RRD: // Address [reg+reg*scale+cns] - write result = IF_AWR; break; case IF_ARW: case IF_ARW_RRD: case IF_ARW_CNS: case IF_ARW_SHF: // Address [reg+reg*scale+cns] - read and write result = IF_ARW; break; case IF_MRD: case IF_MRD_CNS: case IF_MRD_OFF: case IF_MRD_RRD: case IF_RRD_MRD: case IF_RRW_MRD: case IF_RWR_MRD: case IF_RWR_MRD_CNS: case IF_RWR_MRD_OFF: case IF_RWR_RRD_MRD: case IF_RRW_MRD_CNS: case IF_RWR_RRD_MRD_CNS: case IF_RWR_RRD_MRD_RRD: case IF_METHPTR: // Address [cns] - read result = IF_MRD; break; case IF_MWR: case IF_MWR_CNS: case IF_MWR_RRD: case IF_MWR_RRD_CNS: // Address [cns] - write result = IF_MWR; break; case IF_MRW: case IF_MRW_CNS: case IF_MRW_RRD: case IF_MRW_SHF: // Address [cns] - read and write result = IF_MWR; break; case IF_SRD: case IF_SRD_CNS: case IF_SRD_RRD: case IF_RRD_SRD: case IF_RRW_SRD: case IF_RWR_SRD: case IF_RWR_SRD_CNS: case IF_RWR_RRD_SRD: case IF_RRW_SRD_CNS: case IF_RWR_RRD_SRD_CNS: case IF_RWR_RRD_SRD_RRD: // Stack [RSP] - read result = IF_SRD; break; case IF_SWR: case IF_SWR_CNS: case IF_SWR_RRD: case IF_SWR_RRD_CNS: case IF_SWR_LABEL: // Stack [RSP] - write result = IF_SWR; break; case IF_SRW: case IF_SRW_CNS: case IF_SRW_RRD: case IF_SRW_SHF: // Stack [RSP] - read and write result = IF_SWR; break; default: result = IF_NONE; break; } return result; } #if defined(DEBUG) || defined(LATE_DISASM) //---------------------------------------------------------------------------------------- // getInsExecutionCharacteristics: // Returns the current instruction execution characteristics // // Arguments: // id - The current instruction descriptor to be evaluated // // Return Value: // A struct containing the current instruction execution characteristics // // Notes: // The instruction latencies and throughput values returned by this function // are for the Intel Skylake-X processor and are from either: // 1. Agner.org - https://www.agner.org/optimize/instruction_tables.pdf // 2. uops.info - https://uops.info/table.html // emitter::insExecutionCharacteristics emitter::getInsExecutionCharacteristics(instrDesc* id) { insExecutionCharacteristics result; instruction ins = id->idIns(); insFormat insFmt = id->idInsFmt(); emitAttr opSize = id->idOpSize(); insFormat memFmt = getMemoryOperation(id); unsigned memAccessKind; result.insThroughput = PERFSCORE_THROUGHPUT_ILLEGAL; result.insLatency = PERFSCORE_LATENCY_ILLEGAL; // Model the memory latency switch (memFmt) { // Model a read from stack location, possible def to use latency from L0 cache case IF_SRD: result.insLatency = PERFSCORE_LATENCY_RD_STACK; memAccessKind = PERFSCORE_MEMORY_READ; break; case IF_SWR: result.insLatency = PERFSCORE_LATENCY_WR_STACK; memAccessKind = PERFSCORE_MEMORY_WRITE; break; case IF_SRW: result.insLatency = PERFSCORE_LATENCY_RD_WR_STACK; memAccessKind = PERFSCORE_MEMORY_READ_WRITE; break; // Model a read from a constant location, possible def to use latency from L0 cache case IF_MRD: result.insLatency = PERFSCORE_LATENCY_RD_CONST_ADDR; memAccessKind = PERFSCORE_MEMORY_READ; break; case IF_MWR: result.insLatency = PERFSCORE_LATENCY_WR_CONST_ADDR; memAccessKind = PERFSCORE_MEMORY_WRITE; break; case IF_MRW: result.insLatency = PERFSCORE_LATENCY_RD_WR_CONST_ADDR; memAccessKind = PERFSCORE_MEMORY_READ_WRITE; break; // Model a read from memory location, possible def to use latency from L0 or L1 cache case IF_ARD: result.insLatency = PERFSCORE_LATENCY_RD_GENERAL; memAccessKind = PERFSCORE_MEMORY_READ; break; case IF_AWR: result.insLatency = PERFSCORE_LATENCY_WR_GENERAL; memAccessKind = PERFSCORE_MEMORY_WRITE; break; case IF_ARW: result.insLatency = PERFSCORE_LATENCY_RD_WR_GENERAL; memAccessKind = PERFSCORE_MEMORY_READ_WRITE; break; case IF_NONE: result.insLatency = PERFSCORE_LATENCY_ZERO; memAccessKind = PERFSCORE_MEMORY_NONE; break; default: assert(!"Unhandled insFmt for switch (memFmt)"); result.insLatency = PERFSCORE_LATENCY_ZERO; memAccessKind = PERFSCORE_MEMORY_NONE; break; } result.insMemoryAccessKind = memAccessKind; switch (ins) { case INS_align: #if FEATURE_LOOP_ALIGN if ((id->idCodeSize() == 0) || ((instrDescAlign*)id)->isPlacedAfterJmp) { // Either we're not going to generate 'align' instruction, or the 'align' // instruction is placed immediately after unconditional jmp. // In both cases, don't count for PerfScore. result.insThroughput = PERFSCORE_THROUGHPUT_ZERO; result.insLatency = PERFSCORE_LATENCY_ZERO; break; } #endif FALLTHROUGH; case INS_nop: case INS_int3: assert(memFmt == IF_NONE); result.insThroughput = PERFSCORE_THROUGHPUT_4X; result.insLatency = PERFSCORE_LATENCY_ZERO; break; case INS_push: case INS_push_hide: result.insThroughput = PERFSCORE_THROUGHPUT_1C; if (insFmt == IF_RRD) // push reg { // For pushes (stack writes) we assume that the full latency will be covered result.insLatency = PERFSCORE_LATENCY_ZERO; } break; case INS_pop: case INS_pop_hide: if (insFmt == IF_RWR) // pop reg { result.insThroughput = PERFSCORE_THROUGHPUT_2X; // For pops (stack reads) we assume that the full latency will be covered result.insLatency = PERFSCORE_LATENCY_ZERO; } else { result.insThroughput = PERFSCORE_THROUGHPUT_1C; } break; case INS_inc: case INS_dec: case INS_neg: case INS_not: if (memFmt == IF_NONE) { // ins reg result.insThroughput = PERFSCORE_THROUGHPUT_4X; result.insLatency = PERFSCORE_LATENCY_1C; } else { // ins mem result.insThroughput = PERFSCORE_THROUGHPUT_1C; // no additional R/W latency } break; #ifdef TARGET_AMD64 case INS_movsxd: #endif case INS_mov: case INS_movsx: case INS_movzx: case INS_cwde: case INS_cmp: case INS_test: if (memFmt == IF_NONE) { result.insThroughput = PERFSCORE_THROUGHPUT_4X; } else if (memAccessKind == PERFSCORE_MEMORY_READ) { result.insThroughput = PERFSCORE_THROUGHPUT_2X; if (ins == INS_cmp || ins == INS_test) { result.insLatency += PERFSCORE_LATENCY_1C; } else if (ins == INS_movsx #ifdef TARGET_AMD64 || ins == INS_movsxd #endif ) { result.insLatency += PERFSCORE_LATENCY_2C; } } else // writes { assert(memAccessKind == PERFSCORE_MEMORY_WRITE); assert(ins == INS_mov); result.insThroughput = PERFSCORE_THROUGHPUT_1C; } break; case INS_adc: case INS_sbb: if (memAccessKind != PERFSCORE_MEMORY_READ_WRITE) { result.insThroughput = PERFSCORE_THROUGHPUT_2X; result.insLatency += PERFSCORE_LATENCY_1C; } else { result.insThroughput = PERFSCORE_THROUGHPUT_1C; // no additional R/W latency } break; case INS_add: case INS_sub: case INS_and: case INS_or: case INS_xor: if (memFmt == IF_NONE) { result.insThroughput = PERFSCORE_THROUGHPUT_4X; result.insLatency = PERFSCORE_LATENCY_1C; } else if (memAccessKind == PERFSCORE_MEMORY_READ_WRITE) { result.insThroughput = PERFSCORE_THROUGHPUT_1C; // no additional R/W latency } else { result.insThroughput = PERFSCORE_THROUGHPUT_2X; result.insLatency += PERFSCORE_LATENCY_1C; } break; case INS_lea: // uops.info result.insThroughput = PERFSCORE_THROUGHPUT_2X; // one or two components result.insLatency = PERFSCORE_LATENCY_1C; if (insFmt == IF_RWR_LABEL) { // RIP relative addressing // // - throughput is only 1 per cycle // result.insThroughput = PERFSCORE_THROUGHPUT_1C; } else if (insFmt != IF_RWR_SRD) { if (id->idAddr()->iiaAddrMode.amIndxReg != REG_NA) { regNumber baseReg = id->idAddr()->iiaAddrMode.amBaseReg; if (baseReg != REG_NA) { ssize_t dsp = emitGetInsAmdAny(id); if ((dsp != 0) || baseRegisterRequiresDisplacement(baseReg)) { // three components // // - throughput is only 1 per cycle // result.insThroughput = PERFSCORE_THROUGHPUT_1C; if (baseRegisterRequiresDisplacement(baseReg) || id->idIsDspReloc()) { // Increased Latency for these cases // - see https://reviews.llvm.org/D32277 // result.insLatency = PERFSCORE_LATENCY_3C; } } } } } break; case INS_imul_AX: case INS_imul_BX: case INS_imul_CX: case INS_imul_DX: case INS_imul_BP: case INS_imul_SI: case INS_imul_DI: #ifdef TARGET_AMD64 case INS_imul_08: case INS_imul_09: case INS_imul_10: case INS_imul_11: case INS_imul_12: case INS_imul_13: case INS_imul_14: case INS_imul_15: #endif // TARGET_AMD64 case INS_imul: result.insThroughput = PERFSCORE_THROUGHPUT_1C; result.insLatency += PERFSCORE_LATENCY_3C; break; case INS_mulEAX: case INS_imulEAX: // uops.info: mul/imul rdx:rax,reg latency is 3 only if the low half of the result is needed, but in that // case codegen uses imul reg,reg instruction form (except for unsigned overflow checks, which are rare) result.insThroughput = PERFSCORE_THROUGHPUT_1C; result.insLatency += PERFSCORE_LATENCY_4C; break; case INS_div: // The integer divide instructions have long latencies if (opSize == EA_8BYTE) { result.insThroughput = PERFSCORE_THROUGHPUT_52C; result.insLatency = PERFSCORE_LATENCY_62C; } else { assert(opSize == EA_4BYTE); result.insThroughput = PERFSCORE_THROUGHPUT_6C; result.insLatency = PERFSCORE_LATENCY_26C; } break; case INS_idiv: // The integer divide instructions have long latenies if (opSize == EA_8BYTE) { result.insThroughput = PERFSCORE_THROUGHPUT_57C; result.insLatency = PERFSCORE_LATENCY_69C; } else { assert(opSize == EA_4BYTE); result.insThroughput = PERFSCORE_THROUGHPUT_6C; result.insLatency = PERFSCORE_LATENCY_26C; } break; case INS_cdq: result.insThroughput = PERFSCORE_THROUGHPUT_2X; result.insLatency = PERFSCORE_LATENCY_1C; break; case INS_shl: case INS_shr: case INS_sar: case INS_ror: case INS_rol: switch (insFmt) { case IF_RRW_CNS: // ins reg, cns result.insThroughput = PERFSCORE_THROUGHPUT_2X; result.insLatency = PERFSCORE_LATENCY_1C; break; case IF_MRW_CNS: case IF_SRW_CNS: case IF_ARW_CNS: // ins [mem], cns result.insThroughput = PERFSCORE_THROUGHPUT_2C; result.insLatency += PERFSCORE_LATENCY_1C; break; case IF_RRW: // ins reg, cl result.insThroughput = PERFSCORE_THROUGHPUT_2C; result.insLatency = PERFSCORE_LATENCY_2C; break; case IF_MRW: case IF_SRW: case IF_ARW: // ins [mem], cl result.insThroughput = PERFSCORE_THROUGHPUT_4C; result.insLatency += PERFSCORE_LATENCY_2C; break; default: // unhandled instruction insFmt combination perfScoreUnhandledInstruction(id, &result); break; } break; case INS_shl_1: case INS_shr_1: case INS_sar_1: result.insLatency += PERFSCORE_LATENCY_1C; switch (insFmt) { case IF_RRW: // ins reg, 1 result.insThroughput = PERFSCORE_THROUGHPUT_2X; break; case IF_MRW: case IF_SRW: case IF_ARW: // ins [mem], 1 result.insThroughput = PERFSCORE_THROUGHPUT_2C; break; default: // unhandled instruction insFmt combination perfScoreUnhandledInstruction(id, &result); break; } break; case INS_ror_1: case INS_rol_1: result.insThroughput = PERFSCORE_THROUGHPUT_1C; result.insLatency += PERFSCORE_LATENCY_1C; break; case INS_shl_N: case INS_shr_N: case INS_sar_N: case INS_ror_N: case INS_rol_N: result.insLatency += PERFSCORE_LATENCY_1C; switch (insFmt) { case IF_RRW_SHF: // ins reg, cns result.insThroughput = PERFSCORE_THROUGHPUT_2X; break; case IF_MRW_SHF: case IF_SRW_SHF: case IF_ARW_SHF: // ins [mem], cns result.insThroughput = PERFSCORE_THROUGHPUT_2C; break; default: // unhandled instruction insFmt combination perfScoreUnhandledInstruction(id, &result); break; } break; case INS_rcr: case INS_rcl: result.insThroughput = PERFSCORE_THROUGHPUT_6C; result.insLatency += PERFSCORE_LATENCY_6C; break; case INS_rcr_1: case INS_rcl_1: // uops.info result.insThroughput = PERFSCORE_THROUGHPUT_1C; result.insLatency += PERFSCORE_LATENCY_2C; break; case INS_shld: case INS_shrd: result.insLatency += PERFSCORE_LATENCY_3C; if (insFmt == IF_RRW_RRW_CNS) { // ins reg, reg, cns result.insThroughput = PERFSCORE_THROUGHPUT_1C; } else { assert(memAccessKind == PERFSCORE_MEMORY_WRITE); // _SHF form never emitted result.insThroughput = PERFSCORE_THROUGHPUT_2C; } break; case INS_bt: result.insLatency += PERFSCORE_LATENCY_1C; if ((insFmt == IF_RRD_RRD) || (insFmt == IF_RRD_CNS)) { result.insThroughput = PERFSCORE_THROUGHPUT_2X; } else { result.insThroughput = PERFSCORE_THROUGHPUT_1C; } break; case INS_seto: case INS_setno: case INS_setb: case INS_setae: case INS_sete: case INS_setne: case INS_setbe: case INS_seta: case INS_sets: case INS_setns: case INS_setp: case INS_setnp: case INS_setl: case INS_setge: case INS_setle: case INS_setg: result.insLatency += PERFSCORE_LATENCY_1C; if (insFmt == IF_RRD) { result.insThroughput = PERFSCORE_THROUGHPUT_2X; } else { result.insThroughput = PERFSCORE_THROUGHPUT_1C; } break; case INS_jo: case INS_jno: case INS_jb: case INS_jae: case INS_je: case INS_jne: case INS_jbe: case INS_ja: case INS_js: case INS_jns: case INS_jp: case INS_jnp: case INS_jl: case INS_jge: case INS_jle: case INS_jg: // conditional branch result.insThroughput = PERFSCORE_THROUGHPUT_2X; result.insLatency = PERFSCORE_LATENCY_BRANCH_COND; break; case INS_jmp: case INS_l_jmp: // branch to a constant address result.insThroughput = PERFSCORE_THROUGHPUT_2C; result.insLatency = PERFSCORE_LATENCY_BRANCH_DIRECT; break; case INS_tail_i_jmp: case INS_i_jmp: // branch to register result.insThroughput = PERFSCORE_THROUGHPUT_2C; result.insLatency = PERFSCORE_LATENCY_BRANCH_INDIRECT; break; case INS_call: // uops.info result.insLatency = PERFSCORE_LATENCY_ZERO; switch (insFmt) { case IF_LABEL: result.insThroughput = PERFSCORE_THROUGHPUT_1C; break; case IF_METHOD: result.insThroughput = PERFSCORE_THROUGHPUT_1C; break; case IF_METHPTR: result.insThroughput = PERFSCORE_THROUGHPUT_3C; break; case IF_SRD: result.insThroughput = PERFSCORE_THROUGHPUT_3C; break; case IF_ARD: result.insThroughput = PERFSCORE_THROUGHPUT_3C; break; default: // unhandled instruction, insFmt combination perfScoreUnhandledInstruction(id, &result); break; } break; case INS_ret: if (insFmt == IF_CNS) { result.insThroughput = PERFSCORE_THROUGHPUT_2C; } else { assert(insFmt == IF_NONE); result.insThroughput = PERFSCORE_THROUGHPUT_1C; } break; case INS_lock: result.insThroughput = PERFSCORE_THROUGHPUT_13C; break; case INS_xadd: // uops.info result.insThroughput = PERFSCORE_THROUGHPUT_1C; result.insLatency += PERFSCORE_LATENCY_1C; break; case INS_cmpxchg: result.insThroughput = PERFSCORE_THROUGHPUT_5C; break; case INS_xchg: // uops.info result.insThroughput = PERFSCORE_THROUGHPUT_1C; if (memFmt == IF_NONE) { result.insLatency = PERFSCORE_LATENCY_1C; } else { result.insLatency = PERFSCORE_LATENCY_23C; } break; #ifdef TARGET_X86 case INS_fld: case INS_fstp: result.insThroughput = PERFSCORE_THROUGHPUT_2X; if (memAccessKind == PERFSCORE_MEMORY_NONE) { result.insLatency = PERFSCORE_LATENCY_1C; } break; #endif // TARGET_X86 #ifdef TARGET_AMD64 case INS_movsq: case INS_stosq: #endif // TARGET_AMD64 case INS_movsd: case INS_stosd: // uops.info result.insThroughput = PERFSCORE_THROUGHPUT_1C; break; #ifdef TARGET_AMD64 case INS_r_movsq: case INS_r_stosq: #endif // TARGET_AMD64 case INS_r_movsd: case INS_r_movsb: case INS_r_stosd: case INS_r_stosb: // Actually variable sized: rep stosd, used to zero frame slots // uops.info result.insThroughput = PERFSCORE_THROUGHPUT_25C; break; case INS_movd: case INS_movq: // only MOVQ xmm, xmm is different (emitted by Sse2.MoveScalar, should use MOVDQU instead) if (memAccessKind == PERFSCORE_MEMORY_NONE) { // movd r32, xmm or xmm, r32 result.insThroughput = PERFSCORE_THROUGHPUT_1C; result.insLatency = PERFSCORE_LATENCY_3C; } else if (memAccessKind == PERFSCORE_MEMORY_READ) { // movd xmm, m32 result.insThroughput = PERFSCORE_THROUGHPUT_2X; result.insLatency += PERFSCORE_LATENCY_2C; } else { // movd m32, xmm assert(memAccessKind == PERFSCORE_MEMORY_WRITE); result.insThroughput = PERFSCORE_THROUGHPUT_1C; result.insLatency += PERFSCORE_LATENCY_2C; } break; case INS_movdqa: case INS_movdqu: case INS_movaps: case INS_movups: case INS_movapd: case INS_movupd: if (memAccessKind == PERFSCORE_MEMORY_NONE) { // ins reg, reg result.insThroughput = PERFSCORE_THROUGHPUT_4X; result.insLatency = PERFSCORE_LATENCY_ZERO; } else if (memAccessKind == PERFSCORE_MEMORY_READ) { // ins reg, mem result.insThroughput = PERFSCORE_THROUGHPUT_2X; result.insLatency += opSize == EA_32BYTE ? PERFSCORE_LATENCY_3C : PERFSCORE_LATENCY_2C; } else { // ins mem, reg assert(memAccessKind == PERFSCORE_MEMORY_WRITE); result.insThroughput = PERFSCORE_THROUGHPUT_1C; result.insLatency += PERFSCORE_LATENCY_2C; } break; case INS_movhps: case INS_movhpd: case INS_movlps: case INS_movlpd: result.insThroughput = PERFSCORE_THROUGHPUT_1C; if (memAccessKind == PERFSCORE_MEMORY_READ) { result.insLatency += PERFSCORE_LATENCY_3C; } else { assert(memAccessKind == PERFSCORE_MEMORY_WRITE); result.insLatency += PERFSCORE_LATENCY_2C; } break; case INS_movhlps: case INS_movlhps: result.insThroughput = PERFSCORE_THROUGHPUT_1C; result.insLatency = PERFSCORE_LATENCY_1C; break; case INS_movntdq: case INS_movnti: case INS_movntps: case INS_movntpd: assert(memAccessKind == PERFSCORE_MEMORY_WRITE); result.insThroughput = PERFSCORE_THROUGHPUT_1C; result.insLatency = PERFSCORE_LATENCY_400C; // Intel microcode issue with these instuctions break; case INS_maskmovdqu: result.insThroughput = PERFSCORE_THROUGHPUT_6C; result.insLatency = PERFSCORE_LATENCY_400C; // Intel microcode issue with these instuctions break; case INS_movntdqa: assert(memAccessKind == PERFSCORE_MEMORY_READ); result.insThroughput = PERFSCORE_THROUGHPUT_2X; result.insLatency += opSize == EA_32BYTE ? PERFSCORE_LATENCY_3C : PERFSCORE_LATENCY_2C; break; case INS_vzeroupper: result.insThroughput = PERFSCORE_THROUGHPUT_1C; // insLatency is zero and is set when we Model the memory latency break; case INS_movss: case INS_movsdsse2: case INS_movddup: if (memAccessKind == PERFSCORE_MEMORY_NONE) { result.insThroughput = PERFSCORE_THROUGHPUT_1C; result.insLatency = PERFSCORE_LATENCY_1C; } else if (memAccessKind == PERFSCORE_MEMORY_READ) { result.insThroughput = PERFSCORE_THROUGHPUT_2X; result.insLatency += opSize == EA_32BYTE ? PERFSCORE_LATENCY_3C : PERFSCORE_LATENCY_2C; } else { result.insThroughput = PERFSCORE_THROUGHPUT_1C; result.insLatency += PERFSCORE_LATENCY_2C; } break; case INS_lddqu: result.insThroughput = PERFSCORE_THROUGHPUT_2X; result.insLatency += opSize == EA_32BYTE ? PERFSCORE_LATENCY_3C : PERFSCORE_LATENCY_2C; break; case INS_comiss: case INS_comisd: case INS_ucomiss: case INS_ucomisd: result.insThroughput = PERFSCORE_THROUGHPUT_1C; result.insLatency += PERFSCORE_LATENCY_3C; break; case INS_addsd: case INS_addss: case INS_addpd: case INS_addps: case INS_subsd: case INS_subss: case INS_subpd: case INS_subps: case INS_cvttps2dq: case INS_cvtps2dq: case INS_cvtdq2ps: result.insThroughput = PERFSCORE_THROUGHPUT_2X; result.insLatency += PERFSCORE_LATENCY_4C; break; case INS_haddps: case INS_haddpd: result.insThroughput = PERFSCORE_THROUGHPUT_2C; result.insLatency += PERFSCORE_LATENCY_6C; break; case INS_mulss: case INS_mulsd: case INS_mulps: case INS_mulpd: result.insThroughput = PERFSCORE_THROUGHPUT_2X; result.insLatency += PERFSCORE_LATENCY_4C; break; case INS_divss: case INS_divps: result.insThroughput = PERFSCORE_THROUGHPUT_3C; result.insLatency += PERFSCORE_LATENCY_11C; break; case INS_divsd: case INS_divpd: result.insThroughput = PERFSCORE_THROUGHPUT_4C; result.insLatency += PERFSCORE_LATENCY_13C; break; case INS_sqrtss: case INS_sqrtps: result.insThroughput = PERFSCORE_THROUGHPUT_3C; result.insLatency += PERFSCORE_LATENCY_12C; break; case INS_sqrtsd: case INS_sqrtpd: result.insThroughput = PERFSCORE_THROUGHPUT_4C; result.insLatency += PERFSCORE_LATENCY_13C; break; case INS_rcpps: case INS_rcpss: case INS_rsqrtss: case INS_rsqrtps: result.insThroughput = PERFSCORE_THROUGHPUT_1C; result.insLatency += PERFSCORE_LATENCY_4C; break; case INS_roundpd: case INS_roundps: case INS_roundsd: case INS_roundss: result.insThroughput = PERFSCORE_THROUGHPUT_1C; result.insLatency += PERFSCORE_LATENCY_8C; break; case INS_cvttsd2si: case INS_cvtsd2si: case INS_cvtsi2sd: case INS_cvtsi2ss: result.insThroughput = PERFSCORE_THROUGHPUT_1C; result.insLatency += PERFSCORE_LATENCY_7C; break; case INS_cvttss2si: case INS_cvtss2si: result.insThroughput = PERFSCORE_THROUGHPUT_1C; result.insLatency += opSize == EA_8BYTE ? PERFSCORE_LATENCY_8C : PERFSCORE_LATENCY_7C; break; case INS_cvtss2sd: result.insThroughput = PERFSCORE_THROUGHPUT_1C; result.insLatency += PERFSCORE_LATENCY_5C; break; case INS_paddb: case INS_psubb: case INS_paddw: case INS_psubw: case INS_paddd: case INS_psubd: case INS_paddq: case INS_psubq: case INS_paddsb: case INS_psubsb: case INS_paddsw: case INS_psubsw: case INS_paddusb: case INS_psubusb: case INS_paddusw: case INS_psubusw: case INS_pand: case INS_pandn: case INS_por: case INS_pxor: case INS_andpd: case INS_andps: case INS_andnpd: case INS_andnps: case INS_orpd: case INS_orps: case INS_xorpd: case INS_xorps: case INS_blendps: case INS_blendpd: case INS_vpblendd: result.insLatency += PERFSCORE_LATENCY_1C; if (memAccessKind == PERFSCORE_MEMORY_NONE) { result.insThroughput = PERFSCORE_THROUGHPUT_3X; } else { result.insThroughput = PERFSCORE_THROUGHPUT_2X; } break; case INS_andn: case INS_pcmpeqb: case INS_pcmpeqw: case INS_pcmpeqd: case INS_pcmpeqq: case INS_pcmpgtb: case INS_pcmpgtw: case INS_pcmpgtd: case INS_pavgb: case INS_pavgw: case INS_pminub: case INS_pminsb: case INS_pminuw: case INS_pminsw: case INS_pminud: case INS_pminsd: case INS_pmaxub: case INS_pmaxsb: case INS_pmaxuw: case INS_pmaxsw: case INS_pmaxsd: case INS_pmaxud: case INS_pabsb: case INS_pabsw: case INS_pabsd: case INS_psignb: case INS_psignw: case INS_psignd: case INS_vpsravd: case INS_blendvps: case INS_blendvpd: case INS_pblendvb: case INS_vpsllvd: case INS_vpsllvq: case INS_vpsrlvd: case INS_vpsrlvq: result.insThroughput = PERFSCORE_THROUGHPUT_2X; result.insLatency += PERFSCORE_LATENCY_1C; break; case INS_pslld: case INS_psllw: case INS_psllq: case INS_psrlw: case INS_psrld: case INS_psrlq: case INS_psrad: case INS_psraw: if (insFmt == IF_RWR_CNS) { result.insLatency = PERFSCORE_LATENCY_1C; result.insThroughput = PERFSCORE_THROUGHPUT_2X; } else { result.insLatency += opSize == EA_32BYTE ? PERFSCORE_LATENCY_4C : PERFSCORE_LATENCY_2C; result.insThroughput = PERFSCORE_THROUGHPUT_1C; } break; case INS_blsi: case INS_blsmsk: case INS_blsr: case INS_bzhi: case INS_rorx: result.insLatency += PERFSCORE_LATENCY_1C; result.insThroughput = PERFSCORE_THROUGHPUT_2X; break; case INS_bextr: result.insLatency += PERFSCORE_LATENCY_2C; result.insThroughput = PERFSCORE_THROUGHPUT_2X; break; case INS_packuswb: case INS_packusdw: case INS_packsswb: case INS_packssdw: case INS_unpcklps: case INS_unpckhps: case INS_unpcklpd: case INS_unpckhpd: case INS_punpckldq: case INS_punpcklwd: case INS_punpcklbw: case INS_punpckhdq: case INS_punpckhwd: case INS_punpckhbw: case INS_punpcklqdq: case INS_punpckhqdq: case INS_pshufb: case INS_pshufd: case INS_pshuflw: case INS_pshufhw: case INS_shufps: case INS_shufpd: case INS_pblendw: case INS_movsldup: case INS_movshdup: case INS_insertps: case INS_palignr: case INS_vpermilps: case INS_vpermilpd: case INS_vpermilpsvar: case INS_vpermilpdvar: case INS_pslldq: case INS_psrldq: result.insThroughput = PERFSCORE_THROUGHPUT_1C; result.insLatency += PERFSCORE_LATENCY_1C; break; case INS_vblendvps: case INS_vblendvpd: case INS_vpblendvb: result.insThroughput = PERFSCORE_THROUGHPUT_1C; result.insLatency += PERFSCORE_LATENCY_2C; break; case INS_bswap: if (opSize == EA_8BYTE) { result.insThroughput = PERFSCORE_THROUGHPUT_1C; result.insLatency = PERFSCORE_LATENCY_2C; } else { assert(opSize == EA_4BYTE); result.insThroughput = PERFSCORE_THROUGHPUT_2X; result.insLatency = PERFSCORE_LATENCY_1C; } break; case INS_pmovmskb: case INS_movmskpd: case INS_movmskps: result.insThroughput = PERFSCORE_THROUGHPUT_1C; if (opSize == EA_32BYTE) { result.insLatency += ins == INS_pmovmskb ? PERFSCORE_LATENCY_4C : PERFSCORE_LATENCY_5C; } else { result.insLatency += PERFSCORE_LATENCY_3C; } break; case INS_bsf: case INS_bsr: case INS_lzcnt: case INS_tzcnt: case INS_popcnt: case INS_crc32: case INS_pdep: case INS_pext: case INS_pcmpgtq: case INS_psadbw: case INS_vpermps: case INS_vpermpd: case INS_vpermd: case INS_vpermq: case INS_vperm2i128: case INS_vperm2f128: case INS_vextractf128: case INS_vextracti128: case INS_vinsertf128: case INS_vinserti128: result.insThroughput = PERFSCORE_THROUGHPUT_1C; result.insLatency += PERFSCORE_LATENCY_3C; break; case INS_pextrb: case INS_pextrd: case INS_pextrw: case INS_pextrq: case INS_pextrw_sse41: case INS_addsubps: case INS_addsubpd: result.insThroughput = PERFSCORE_THROUGHPUT_1C; result.insLatency += PERFSCORE_LATENCY_4C; break; case INS_pmovsxbw: case INS_pmovsxbd: case INS_pmovsxbq: case INS_pmovsxwd: case INS_pmovsxwq: case INS_pmovsxdq: case INS_pmovzxbw: case INS_pmovzxbd: case INS_pmovzxbq: case INS_pmovzxwd: case INS_pmovzxwq: case INS_pmovzxdq: result.insThroughput = PERFSCORE_THROUGHPUT_1C; result.insLatency += opSize == EA_32BYTE ? PERFSCORE_LATENCY_3C : PERFSCORE_LATENCY_1C; break; case INS_phaddw: case INS_phaddd: case INS_phaddsw: case INS_phsubw: case INS_phsubsw: case INS_phsubd: result.insThroughput = PERFSCORE_THROUGHPUT_2C; result.insLatency += PERFSCORE_LATENCY_3C; break; case INS_cmpps: case INS_cmppd: case INS_cmpss: case INS_cmpsd: result.insThroughput = PERFSCORE_THROUGHPUT_2X; result.insLatency = PERFSCORE_LATENCY_4C; break; case INS_mulx: case INS_maxps: case INS_maxpd: case INS_maxss: case INS_maxsd: case INS_minps: case INS_minpd: case INS_minss: case INS_minsd: case INS_phminposuw: case INS_extractps: result.insThroughput = PERFSCORE_THROUGHPUT_1C; result.insLatency += PERFSCORE_LATENCY_4C; break; case INS_ptest: result.insThroughput = PERFSCORE_THROUGHPUT_1C; result.insLatency += opSize == EA_32BYTE ? PERFSCORE_LATENCY_6C : PERFSCORE_LATENCY_4C; break; case INS_mpsadbw: result.insThroughput = PERFSCORE_THROUGHPUT_2C; result.insLatency += PERFSCORE_LATENCY_4C; break; case INS_pmullw: case INS_pmulhw: case INS_pmulhuw: case INS_pmulhrsw: case INS_pmuldq: case INS_pmuludq: case INS_pmaddwd: case INS_pmaddubsw: result.insThroughput = PERFSCORE_THROUGHPUT_2X; result.insLatency += PERFSCORE_LATENCY_5C; break; case INS_cvtsd2ss: case INS_cvtps2pd: case INS_cvtpd2dq: case INS_cvtdq2pd: case INS_cvtpd2ps: case INS_cvttpd2dq: result.insThroughput = PERFSCORE_THROUGHPUT_1C; result.insLatency += opSize == EA_32BYTE ? PERFSCORE_LATENCY_7C : PERFSCORE_LATENCY_5C; break; case INS_vtestps: case INS_vtestpd: result.insThroughput = PERFSCORE_THROUGHPUT_1C; result.insLatency += opSize == EA_32BYTE ? PERFSCORE_LATENCY_5C : PERFSCORE_LATENCY_3C; break; case INS_hsubps: case INS_hsubpd: result.insThroughput = PERFSCORE_THROUGHPUT_2C; result.insLatency += PERFSCORE_LATENCY_6C; break; case INS_pclmulqdq: result.insThroughput = PERFSCORE_THROUGHPUT_1C; result.insLatency += PERFSCORE_LATENCY_7C; break; case INS_pmulld: result.insThroughput = PERFSCORE_THROUGHPUT_1C; result.insLatency += PERFSCORE_LATENCY_10C; break; case INS_vpbroadcastb: case INS_vpbroadcastw: case INS_vpbroadcastd: case INS_vpbroadcastq: case INS_vbroadcasti128: case INS_vbroadcastf128: case INS_vbroadcastss: case INS_vbroadcastsd: if (memAccessKind == PERFSCORE_MEMORY_NONE) { result.insThroughput = PERFSCORE_THROUGHPUT_1C; result.insLatency = opSize == EA_32BYTE ? PERFSCORE_LATENCY_3C : PERFSCORE_LATENCY_1C; } else { result.insThroughput = PERFSCORE_THROUGHPUT_2X; result.insLatency += opSize == EA_32BYTE ? PERFSCORE_LATENCY_3C : PERFSCORE_LATENCY_2C; if (ins == INS_vpbroadcastb || ins == INS_vpbroadcastw) { result.insLatency += PERFSCORE_LATENCY_1C; } } break; case INS_pinsrb: case INS_pinsrw: case INS_pinsrd: case INS_pinsrq: if (memAccessKind == PERFSCORE_MEMORY_NONE) { result.insThroughput = PERFSCORE_THROUGHPUT_2C; result.insLatency = PERFSCORE_LATENCY_4C; } else { result.insThroughput = PERFSCORE_THROUGHPUT_1C; result.insLatency += PERFSCORE_LATENCY_3C; } break; case INS_dppd: result.insThroughput = PERFSCORE_THROUGHPUT_1C; result.insLatency = PERFSCORE_LATENCY_9C; break; case INS_dpps: result.insThroughput = PERFSCORE_THROUGHPUT_2C; result.insLatency = PERFSCORE_LATENCY_13C; break; case INS_vfmadd132pd: case INS_vfmadd213pd: case INS_vfmadd231pd: case INS_vfmadd132ps: case INS_vfmadd213ps: case INS_vfmadd231ps: case INS_vfmadd132sd: case INS_vfmadd213sd: case INS_vfmadd231sd: case INS_vfmadd132ss: case INS_vfmadd213ss: case INS_vfmadd231ss: case INS_vfmaddsub132pd: case INS_vfmaddsub213pd: case INS_vfmaddsub231pd: case INS_vfmaddsub132ps: case INS_vfmaddsub213ps: case INS_vfmaddsub231ps: case INS_vfmsubadd132pd: case INS_vfmsubadd213pd: case INS_vfmsubadd231pd: case INS_vfmsubadd132ps: case INS_vfmsubadd213ps: case INS_vfmsubadd231ps: case INS_vfmsub132pd: case INS_vfmsub213pd: case INS_vfmsub231pd: case INS_vfmsub132ps: case INS_vfmsub213ps: case INS_vfmsub231ps: case INS_vfmsub132sd: case INS_vfmsub213sd: case INS_vfmsub231sd: case INS_vfmsub132ss: case INS_vfmsub213ss: case INS_vfmsub231ss: case INS_vfnmadd132pd: case INS_vfnmadd213pd: case INS_vfnmadd231pd: case INS_vfnmadd132ps: case INS_vfnmadd213ps: case INS_vfnmadd231ps: case INS_vfnmadd132sd: case INS_vfnmadd213sd: case INS_vfnmadd231sd: case INS_vfnmadd132ss: case INS_vfnmadd213ss: case INS_vfnmadd231ss: case INS_vfnmsub132pd: case INS_vfnmsub213pd: case INS_vfnmsub231pd: case INS_vfnmsub132ps: case INS_vfnmsub213ps: case INS_vfnmsub231ps: case INS_vfnmsub132sd: case INS_vfnmsub213sd: case INS_vfnmsub231sd: case INS_vfnmsub132ss: case INS_vfnmsub213ss: case INS_vfnmsub231ss: case INS_vpdpbusd: // will be populated when the HW becomes publicly available case INS_vpdpwssd: // will be populated when the HW becomes publicly available case INS_vpdpbusds: // will be populated when the HW becomes publicly available case INS_vpdpwssds: // will be populated when the HW becomes publicly available // uops.info result.insThroughput = PERFSCORE_THROUGHPUT_2X; result.insLatency += PERFSCORE_LATENCY_4C; break; case INS_vmaskmovpd: case INS_vmaskmovps: case INS_vpmaskmovd: case INS_vpmaskmovq: if (memAccessKind == PERFSCORE_MEMORY_READ) { result.insThroughput = PERFSCORE_THROUGHPUT_2X; result.insLatency += opSize == EA_32BYTE ? PERFSCORE_LATENCY_4C : PERFSCORE_LATENCY_3C; } else { assert(memAccessKind == PERFSCORE_MEMORY_WRITE); result.insThroughput = PERFSCORE_THROUGHPUT_1C; result.insLatency += PERFSCORE_LATENCY_12C; } break; case INS_vpgatherdd: case INS_vgatherdps: result.insThroughput = PERFSCORE_THROUGHPUT_4C; result.insLatency += opSize == EA_32BYTE ? PERFSCORE_LATENCY_13C : PERFSCORE_LATENCY_11C; break; case INS_vpgatherdq: case INS_vpgatherqd: case INS_vpgatherqq: case INS_vgatherdpd: case INS_vgatherqps: case INS_vgatherqpd: result.insThroughput = PERFSCORE_THROUGHPUT_4C; result.insLatency += opSize == EA_32BYTE ? PERFSCORE_LATENCY_11C : PERFSCORE_LATENCY_9C; break; case INS_aesdec: case INS_aesdeclast: case INS_aesenc: case INS_aesenclast: result.insThroughput = PERFSCORE_THROUGHPUT_1C; result.insLatency += PERFSCORE_LATENCY_4C; break; case INS_aesimc: result.insThroughput = PERFSCORE_THROUGHPUT_2C; result.insLatency += PERFSCORE_LATENCY_8C; break; case INS_aeskeygenassist: result.insThroughput = PERFSCORE_THROUGHPUT_13C; result.insLatency += PERFSCORE_LATENCY_7C; break; case INS_lfence: result.insThroughput = PERFSCORE_THROUGHPUT_4C; break; case INS_sfence: result.insThroughput = PERFSCORE_THROUGHPUT_6C; break; case INS_mfence: result.insThroughput = PERFSCORE_THROUGHPUT_33C; break; case INS_prefetcht0: case INS_prefetcht1: case INS_prefetcht2: case INS_prefetchnta: result.insThroughput = PERFSCORE_THROUGHPUT_2X; break; case INS_pause: { result.insLatency = PERFSCORE_LATENCY_140C; result.insThroughput = PERFSCORE_THROUGHPUT_140C; break; } default: // unhandled instruction insFmt combination perfScoreUnhandledInstruction(id, &result); break; } return result; } #endif // defined(DEBUG) || defined(LATE_DISASM) /*****************************************************************************/ /*****************************************************************************/ #endif // defined(TARGET_XARCH)
-1
dotnet/runtime
66,234
Remove compiler warning suppression
Contributes to https://github.com/dotnet/runtime/issues/66154 All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address. ~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~ Upstream PR: https://github.com/libunwind/libunwind/pull/333 /cc @GrabYourPitchforks
AaronRobinsonMSFT
2022-03-05T03:45:49Z
2022-03-09T00:57:12Z
220e67755ae6323a01011b83070dff6f84b02519
853e494abd1c1e12d28a76829b4680af7f694afa
Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154 All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address. ~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~ Upstream PR: https://github.com/libunwind/libunwind/pull/333 /cc @GrabYourPitchforks
./src/coreclr/utilcode/configuration.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // -------------------------------------------------------------------------------------------------- // configuration.cpp // // // Access and update configuration values, falling back on legacy CLRConfig methods where necessary. // // -------------------------------------------------------------------------------------------------- #include "stdafx.h" #include "clrconfig.h" #include "configuration.h" LPCWSTR *knobNames = nullptr; LPCWSTR *knobValues = nullptr; int numberOfKnobs = 0; void Configuration::InitializeConfigurationKnobs(int numberOfConfigs, LPCWSTR *names, LPCWSTR *values) { numberOfKnobs = numberOfConfigs; // Neither should be null, or both should be null _ASSERT(!((names == nullptr) ^ (values == nullptr))); knobNames = names; knobValues = values; } static LPCWSTR GetConfigurationValue(LPCWSTR name) { _ASSERT(name != nullptr); if (name == nullptr || knobNames == nullptr || knobValues == nullptr) { return nullptr; } for (int i = 0; i < numberOfKnobs; ++i) { _ASSERT(knobNames[i] != nullptr); if (wcscmp(name, knobNames[i]) == 0) { return knobValues[i]; } } return nullptr; } DWORD Configuration::GetKnobDWORDValue(LPCWSTR name, const CLRConfig::ConfigDWORDInfo& dwordInfo) { bool returnedDefaultValue; DWORD legacyValue = CLRConfig::GetConfigValue(dwordInfo, &returnedDefaultValue); if (!returnedDefaultValue) { return legacyValue; } LPCWSTR knobValue = GetConfigurationValue(name); if (knobValue != nullptr) { return wcstoul(knobValue, nullptr, 0); } return legacyValue; } DWORD Configuration::GetKnobDWORDValue(LPCWSTR name, DWORD defaultValue) { LPCWSTR knobValue = GetConfigurationValue(name); if (knobValue != nullptr) { return wcstoul(knobValue, nullptr, 0); } return defaultValue; } ULONGLONG Configuration::GetKnobULONGLONGValue(LPCWSTR name, ULONGLONG defaultValue) { LPCWSTR knobValue = GetConfigurationValue(name); if (knobValue != nullptr) { return _wcstoui64(knobValue, nullptr, 0); } return defaultValue; } LPCWSTR Configuration::GetKnobStringValue(LPCWSTR name, const CLRConfig::ConfigStringInfo& stringInfo) { LPCWSTR value = CLRConfig::GetConfigValue(stringInfo); if (value == nullptr) { value = GetConfigurationValue(name); } return value; } LPCWSTR Configuration::GetKnobStringValue(LPCWSTR name) { return GetConfigurationValue(name); } bool Configuration::GetKnobBooleanValue(LPCWSTR name, const CLRConfig::ConfigDWORDInfo& dwordInfo) { bool returnedDefaultValue; DWORD legacyValue = CLRConfig::GetConfigValue(dwordInfo, &returnedDefaultValue); if (!returnedDefaultValue) { return (legacyValue != 0); } LPCWSTR knobValue = GetConfigurationValue(name); if (knobValue != nullptr) { return (wcscmp(knobValue, W("true")) == 0); } return (legacyValue != 0); } bool Configuration::GetKnobBooleanValue(LPCWSTR name, bool defaultValue) { LPCWSTR knobValue = GetConfigurationValue(name); if (knobValue != nullptr) { return (wcscmp(knobValue, W("true")) == 0); } return defaultValue; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // -------------------------------------------------------------------------------------------------- // configuration.cpp // // // Access and update configuration values, falling back on legacy CLRConfig methods where necessary. // // -------------------------------------------------------------------------------------------------- #include "stdafx.h" #include "clrconfig.h" #include "configuration.h" LPCWSTR *knobNames = nullptr; LPCWSTR *knobValues = nullptr; int numberOfKnobs = 0; void Configuration::InitializeConfigurationKnobs(int numberOfConfigs, LPCWSTR *names, LPCWSTR *values) { numberOfKnobs = numberOfConfigs; // Neither should be null, or both should be null _ASSERT(!((names == nullptr) ^ (values == nullptr))); knobNames = names; knobValues = values; } static LPCWSTR GetConfigurationValue(LPCWSTR name) { _ASSERT(name != nullptr); if (name == nullptr || knobNames == nullptr || knobValues == nullptr) { return nullptr; } for (int i = 0; i < numberOfKnobs; ++i) { _ASSERT(knobNames[i] != nullptr); if (wcscmp(name, knobNames[i]) == 0) { return knobValues[i]; } } return nullptr; } DWORD Configuration::GetKnobDWORDValue(LPCWSTR name, const CLRConfig::ConfigDWORDInfo& dwordInfo) { bool returnedDefaultValue; DWORD legacyValue = CLRConfig::GetConfigValue(dwordInfo, &returnedDefaultValue); if (!returnedDefaultValue) { return legacyValue; } LPCWSTR knobValue = GetConfigurationValue(name); if (knobValue != nullptr) { return wcstoul(knobValue, nullptr, 0); } return legacyValue; } DWORD Configuration::GetKnobDWORDValue(LPCWSTR name, DWORD defaultValue) { LPCWSTR knobValue = GetConfigurationValue(name); if (knobValue != nullptr) { return wcstoul(knobValue, nullptr, 0); } return defaultValue; } ULONGLONG Configuration::GetKnobULONGLONGValue(LPCWSTR name, ULONGLONG defaultValue) { LPCWSTR knobValue = GetConfigurationValue(name); if (knobValue != nullptr) { return _wcstoui64(knobValue, nullptr, 0); } return defaultValue; } LPCWSTR Configuration::GetKnobStringValue(LPCWSTR name, const CLRConfig::ConfigStringInfo& stringInfo) { LPCWSTR value = CLRConfig::GetConfigValue(stringInfo); if (value == nullptr) { value = GetConfigurationValue(name); } return value; } LPCWSTR Configuration::GetKnobStringValue(LPCWSTR name) { return GetConfigurationValue(name); } bool Configuration::GetKnobBooleanValue(LPCWSTR name, const CLRConfig::ConfigDWORDInfo& dwordInfo) { bool returnedDefaultValue; DWORD legacyValue = CLRConfig::GetConfigValue(dwordInfo, &returnedDefaultValue); if (!returnedDefaultValue) { return (legacyValue != 0); } LPCWSTR knobValue = GetConfigurationValue(name); if (knobValue != nullptr) { return (wcscmp(knobValue, W("true")) == 0); } return (legacyValue != 0); } bool Configuration::GetKnobBooleanValue(LPCWSTR name, bool defaultValue) { LPCWSTR knobValue = GetConfigurationValue(name); if (knobValue != nullptr) { return (wcscmp(knobValue, W("true")) == 0); } return defaultValue; }
-1